CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-modelscope

ModelScope brings the notion of Model-as-a-Service to life with unified interfaces for state-of-the-art machine learning models.

Pending
Overview
Eval results
Files

export.mddocs/

Model Export

ModelScope's export framework provides capabilities for converting and deploying models to different formats and target platforms. The framework supports various export formats including ONNX, TensorRT, and framework-specific formats.

Capabilities

Base Exporter Class

Abstract base class for all model exporters.

class Exporter:
    """
    Base class for model exporters.
    """
    
    def __init__(self, **kwargs):
        """
        Initialize exporter with configuration parameters.
        
        Parameters:
        - **kwargs: Exporter-specific configuration options
        """
    
    def export(
        self,
        model,
        output_dir: str,
        input_shape: tuple = None,
        **kwargs
    ):
        """
        Export model to target format.
        
        Parameters:
        - model: Model instance to export
        - output_dir: Directory to save exported model
        - input_shape: Expected input shape for the model
        - **kwargs: Additional export parameters
        """

Framework-Specific Exporters

Exporters for different deep learning frameworks.

class TorchModelExporter(Exporter):
    """
    PyTorch model exporter supporting various output formats.
    """
    
    def __init__(self, **kwargs):
        """Initialize PyTorch model exporter."""
    
    def export(
        self,
        model,
        output_dir: str,
        input_shape: tuple = None,
        export_format: str = 'onnx',
        opset_version: int = 11,
        **kwargs
    ):
        """
        Export PyTorch model.
        
        Parameters:
        - model: PyTorch model instance
        - output_dir: Output directory for exported model
        - input_shape: Model input shape
        - export_format: Target format ('onnx', 'torchscript', 'tensorrt')
        - opset_version: ONNX opset version
        """

class TfModelExporter(Exporter):
    """
    TensorFlow model exporter for various deployment formats.
    """
    
    def __init__(self, **kwargs):
        """Initialize TensorFlow model exporter."""
    
    def export(
        self,
        model,
        output_dir: str,
        export_format: str = 'savedmodel',
        **kwargs
    ):
        """
        Export TensorFlow model.
        
        Parameters:
        - model: TensorFlow model instance
        - output_dir: Output directory for exported model
        - export_format: Target format ('savedmodel', 'tflite', 'tfjs')
        """

Exporter Builder

Factory function for creating exporters from configuration.

def build_exporter(cfg: dict, default_args: dict = None):
    """
    Build exporter from configuration dictionary.
    
    Parameters:
    - cfg: Exporter configuration dictionary
    - default_args: Default arguments to merge
    
    Returns:
    Exporter instance
    """

Domain-Specific Exporters

Computer Vision Exporters

class CartoonTranslationExporter(Exporter):
    """
    Specialized exporter for cartoon translation models.
    """
    
    def __init__(self, **kwargs):
        """Initialize cartoon translation exporter."""

class FaceDetectionSCRFDExporter(Exporter):
    """
    Exporter for SCRFD face detection models.
    """
    
    def __init__(self, **kwargs):
        """Initialize SCRFD face detection exporter."""

Multi-Modal Exporters

class StableDiffusionExporter(Exporter):
    """
    Exporter for Stable Diffusion models with optimization for inference.
    """
    
    def __init__(self, **kwargs):
        """Initialize Stable Diffusion exporter."""

Natural Language Processing Exporters

class CsanmtForTranslationExporter(Exporter):
    """
    Exporter for CSANMT translation models.
    """
    
    def __init__(self, **kwargs):
        """Initialize CSANMT translation exporter."""

class SbertForSequenceClassificationExporter(Exporter):
    """
    Exporter for Sentence-BERT sequence classification models.
    """
    
    def __init__(self, **kwargs):
        """Initialize SBERT sequence classification exporter."""

class SbertForZeroShotClassificationExporter(Exporter):
    """
    Exporter for Sentence-BERT zero-shot classification models.
    """
    
    def __init__(self, **kwargs):
        """Initialize SBERT zero-shot classification exporter."""

Usage Examples

Basic Model Export

from modelscope import Model, TorchModelExporter

# Load PyTorch model
model = Model.from_pretrained('damo/cv_resnet50_image-classification_imagenet')

# Create exporter
exporter = TorchModelExporter()

# Export to ONNX format
exporter.export(
    model=model,
    output_dir='./exported_model',
    input_shape=(1, 3, 224, 224),
    export_format='onnx',
    opset_version=11
)

print("Model exported to ONNX format")

Export with Different Formats

from modelscope import Model, TorchModelExporter

model = Model.from_pretrained('model_name')
exporter = TorchModelExporter()

# Export to ONNX
exporter.export(
    model=model,
    output_dir='./onnx_export',
    export_format='onnx',
    input_shape=(1, 3, 224, 224)
)

# Export to TorchScript
exporter.export(
    model=model,
    output_dir='./torchscript_export',
    export_format='torchscript',
    input_shape=(1, 3, 224, 224)
)

# Export to TensorRT
exporter.export(
    model=model,
    output_dir='./tensorrt_export',
    export_format='tensorrt',
    input_shape=(1, 3, 224, 224),
    precision='fp16'  # Half precision for faster inference
)

TensorFlow Model Export

from modelscope import Model, TfModelExporter

# Load TensorFlow model
tf_model = Model.from_pretrained('tensorflow_model_name')

# Create TensorFlow exporter
tf_exporter = TfModelExporter()

# Export to SavedModel format
tf_exporter.export(
    model=tf_model,
    output_dir='./tf_savedmodel',
    export_format='savedmodel'
)

# Export to TensorFlow Lite
tf_exporter.export(
    model=tf_model,
    output_dir='./tf_lite',
    export_format='tflite',
    optimize=True,  # Enable optimizations
    quantize=True   # Enable quantization
)

# Export to TensorFlow.js
tf_exporter.export(
    model=tf_model,
    output_dir='./tfjs',
    export_format='tfjs'
)

Domain-Specific Export

from modelscope import Model
from modelscope.exporters import FaceDetectionSCRFDExporter, StableDiffusionExporter

# Face detection model export
face_model = Model.from_pretrained('damo/cv_ddsar_face-detection_iclr23-damofd')
face_exporter = FaceDetectionSCRFDExporter()

face_exporter.export(
    model=face_model,
    output_dir='./face_detection_export',
    input_shape=(1, 3, 640, 640),
    confidence_threshold=0.5,
    nms_threshold=0.4
)

# Stable Diffusion model export
sd_model = Model.from_pretrained('stable_diffusion_model')
sd_exporter = StableDiffusionExporter()

sd_exporter.export(
    model=sd_model,
    output_dir='./stable_diffusion_export',
    optimize_for_inference=True,
    enable_memory_efficient_attention=True
)

Export Configuration

from modelscope import build_exporter

# Define export configuration
export_config = {
    'type': 'TorchModelExporter',
    'export_format': 'onnx',
    'opset_version': 12,
    'dynamic_axes': {
        'input': {0: 'batch_size'},
        'output': {0: 'batch_size'}
    },
    'optimize': True
}

# Build exporter from configuration
exporter = build_exporter(export_config)

# Export model
exporter.export(
    model=model,
    output_dir='./configured_export',
    input_shape=(1, 3, 224, 224)
)

Batch Export for Multiple Models

from modelscope import Model, TorchModelExporter

models_to_export = [
    'damo/cv_resnet50_image-classification_imagenet',
    'damo/nlp_structbert_sentence-similarity_chinese',
    'damo/cv_yolox-s_detection_coco'
]

exporter = TorchModelExporter()

for model_name in models_to_export:
    print(f"Exporting {model_name}...")
    
    # Load model
    model = Model.from_pretrained(model_name)
    
    # Determine input shape based on model type
    if 'cv' in model_name:
        input_shape = (1, 3, 224, 224)
    else:
        input_shape = (1, 512)  # For NLP models
    
    # Export model
    output_dir = f"./exports/{model_name.replace('/', '_')}"
    exporter.export(
        model=model,
        output_dir=output_dir,
        input_shape=input_shape,
        export_format='onnx'
    )
    
    print(f"Exported to {output_dir}")

Custom Export with Preprocessing

from modelscope import Model, TorchModelExporter
import torch

class ModelWithPreprocessing(torch.nn.Module):
    def __init__(self, base_model, preprocessor):
        super().__init__()
        self.base_model = base_model
        self.preprocessor = preprocessor
    
    def forward(self, x):
        # Apply preprocessing
        x = self.preprocessor(x)
        # Run through base model
        return self.base_model(x)

# Load base model and preprocessor
base_model = Model.from_pretrained('model_name')
preprocessor = torch.nn.Sequential(
    torch.nn.functional.normalize,
    # Add other preprocessing steps
)

# Combine model with preprocessing
combined_model = ModelWithPreprocessing(base_model, preprocessor)

# Export combined model
exporter = TorchModelExporter()
exporter.export(
    model=combined_model,
    output_dir='./combined_export',
    input_shape=(1, 3, 224, 224),
    export_format='onnx'
)

Export Validation and Testing

from modelscope import Model, TorchModelExporter
import numpy as np
import onnxruntime as ort

# Export model
model = Model.from_pretrained('model_name')
exporter = TorchModelExporter()

output_dir = './validation_export'
exporter.export(
    model=model,
    output_dir=output_dir,
    input_shape=(1, 3, 224, 224),
    export_format='onnx'
)

# Validate exported model
def validate_export(original_model, onnx_path, test_input):
    # Get original model output
    original_model.eval()
    with torch.no_grad():
        original_output = original_model(test_input)
    
    # Get ONNX model output
    ort_session = ort.InferenceSession(onnx_path)
    onnx_output = ort_session.run(
        None, 
        {'input': test_input.numpy()}
    )[0]
    
    # Compare outputs
    diff = np.abs(original_output.numpy() - onnx_output)
    max_diff = np.max(diff)
    
    print(f"Maximum difference: {max_diff}")
    return max_diff < 1e-5  # Tolerance for numerical differences

# Create test input
test_input = torch.randn(1, 3, 224, 224)

# Validate
is_valid = validate_export(
    model, 
    f'{output_dir}/model.onnx', 
    test_input
)

print(f"Export validation: {'PASSED' if is_valid else 'FAILED'}")

Production Deployment Export

from modelscope import Model, TorchModelExporter

# Load production model
production_model = Model.from_pretrained('production_model_name')

# Configure for production deployment
exporter = TorchModelExporter()

# Export optimized for production
exporter.export(
    model=production_model,
    output_dir='./production_export',
    input_shape=(1, 3, 224, 224),
    export_format='onnx',
    opset_version=12,
    optimize=True,
    dynamic_axes={
        'input': {0: 'batch_size'},
        'output': {0: 'batch_size'}
    },
    # Production-specific optimizations
    graph_optimization_level='all',
    enable_memory_pattern=True,
    enable_cpu_mem_arena=True
)

print("Model exported and optimized for production deployment")

Install with Tessl CLI

npx tessl i tessl/pypi-modelscope

docs

datasets.md

export.md

hub.md

index.md

metrics.md

models.md

pipelines.md

preprocessors.md

training.md

utilities.md

tile.json