CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-modelscope

ModelScope brings the notion of Model-as-a-Service to life with unified interfaces for state-of-the-art machine learning models.

Pending
Overview
Eval results
Files

utilities.mddocs/

Utilities

ModelScope provides a comprehensive set of utility functions and classes for configuration management, logging, task constants, and various helper functions used throughout the ecosystem.

Capabilities

Task Constants

Comprehensive task type constants organized by domain.

class Tasks:
    """
    Comprehensive task type constants for different AI domains.
    """
    
    # Computer Vision Tasks
    class CVTasks:
        IMAGE_CLASSIFICATION = 'image-classification'
        OBJECT_DETECTION = 'object-detection'
        SEMANTIC_SEGMENTATION = 'semantic-segmentation'
        INSTANCE_SEGMENTATION = 'instance-segmentation'
        FACE_DETECTION = 'face-detection'
        FACE_RECOGNITION = 'face-recognition'
        IMAGE_GENERATION = 'image-generation'
        IMAGE_INPAINTING = 'image-inpainting'
        IMAGE_DENOISING = 'image-denoising'
        IMAGE_SUPER_RESOLUTION = 'image-super-resolution'
        # Additional CV task constants available
    
    # Natural Language Processing Tasks
    class NLPTasks:
        TEXT_CLASSIFICATION = 'text-classification'
        TOKEN_CLASSIFICATION = 'token-classification'
        TEXT_GENERATION = 'text-generation'
        TRANSLATION = 'translation'
        SUMMARIZATION = 'summarization'
        QUESTION_ANSWERING = 'question-answering'
        SENTIMENT_ANALYSIS = 'sentiment-analysis'
        NAMED_ENTITY_RECOGNITION = 'named-entity-recognition'
        TEXT_RANKING = 'text-ranking'
        FILL_MASK = 'fill-mask'
        # Additional NLP task constants available
    
    # Audio Processing Tasks
    class AudioTasks:
        AUTOMATIC_SPEECH_RECOGNITION = 'automatic-speech-recognition'
        TEXT_TO_SPEECH = 'text-to-speech'
        AUDIO_CLASSIFICATION = 'audio-classification'
        SPEAKER_RECOGNITION = 'speaker-recognition'
        AUDIO_SEPARATION = 'audio-separation'
        # Additional audio task constants available
    
    # Multi-Modal Tasks
    class MultiModalTasks:
        IMAGE_CAPTIONING = 'image-captioning'
        VISUAL_QUESTION_ANSWERING = 'visual-question-answering'
        TEXT_TO_IMAGE_SYNTHESIS = 'text-to-image-synthesis'
        IMAGE_TEXT_MATCHING = 'image-text-matching'
        # Additional multi-modal task constants available
    
    # Scientific Computing Tasks
    class ScienceTasks:
        PROTEIN_STRUCTURE_PREDICTION = 'protein-structure-prediction'
        MOLECULAR_PROPERTY_PREDICTION = 'molecular-property-prediction'
        # Additional science task constants available

Logging Utilities

Comprehensive logging configuration and management.

def get_logger(
    log_file: str = None,
    log_level: int = INFO,
    file_mode: str = 'w',
    **kwargs
) -> Logger:
    """
    Create and configure logger for ModelScope applications.
    
    Parameters:
    - log_file: Path to log file (optional, logs to console if None)
    - log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
    - file_mode: File mode for log file ('w' for write, 'a' for append)
    - **kwargs: Additional logging configuration options
    
    Returns:
    Configured logger instance
    """

Configuration Management

Utilities for reading and managing configuration files.

def read_config(path: str, **kwargs) -> dict:
    """
    Read configuration from file (JSON, YAML, or Python).
    
    Parameters:
    - path: Path to configuration file
    - **kwargs: Additional configuration loading options
    
    Returns:
    Configuration dictionary
    """

Hub Utilities

Utility functions for interacting with ModelScope Hub.

def create_model_if_not_exist(
    model_name: str,
    model_type: str = 'model',
    **kwargs
):
    """
    Create model repository on ModelScope Hub if it doesn't exist.
    
    Parameters:
    - model_name: Name of the model repository
    - model_type: Type of repository ('model', 'dataset')
    - **kwargs: Additional repository creation options
    """

Usage Examples

Using Task Constants

from modelscope import Tasks, pipeline

# Use task constants for pipeline creation
classifier = pipeline(Tasks.CVTasks.IMAGE_CLASSIFICATION, model='model_name')
detector = pipeline(Tasks.CVTasks.OBJECT_DETECTION, model='detection_model')

# NLP tasks
text_classifier = pipeline(Tasks.NLPTasks.TEXT_CLASSIFICATION, model='nlp_model')
generator = pipeline(Tasks.NLPTasks.TEXT_GENERATION, model='generation_model')

# Audio tasks
asr = pipeline(Tasks.AudioTasks.AUTOMATIC_SPEECH_RECOGNITION, model='speech_model')
tts = pipeline(Tasks.AudioTasks.TEXT_TO_SPEECH, model='tts_model')

# Multi-modal tasks
captioner = pipeline(Tasks.MultiModalTasks.IMAGE_CAPTIONING, model='caption_model')
vqa = pipeline(Tasks.MultiModalTasks.VISUAL_QUESTION_ANSWERING, model='vqa_model')

# List all available CV tasks
cv_tasks = [attr for attr in dir(Tasks.CVTasks) if not attr.startswith('_')]
print(f"Available CV tasks: {cv_tasks}")

Logger Configuration

from modelscope import get_logger
import logging

# Basic logger
logger = get_logger()
logger.info("Basic logging message")

# Logger with file output
file_logger = get_logger(
    log_file='modelscope.log',
    log_level=logging.DEBUG,
    file_mode='a'
)

file_logger.debug("Debug message")
file_logger.info("Info message")
file_logger.warning("Warning message")
file_logger.error("Error message")

# Custom logger configuration
custom_logger = get_logger(
    log_file='custom.log',
    log_level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)

custom_logger.info("Custom formatted log message")

Configuration File Management

from modelscope import read_config

# Read JSON configuration
json_config = read_config('config.json')
print(f"JSON config: {json_config}")

# Read YAML configuration
yaml_config = read_config('config.yaml')
print(f"YAML config: {yaml_config}")

# Read Python configuration file
py_config = read_config('config.py')
print(f"Python config: {py_config}")

# Use configuration in model training
training_config = read_config('training_config.json')

from modelscope import EpochBasedTrainer, TrainingArgs

training_args = TrainingArgs(
    output_dir=training_config['output_dir'],
    max_epochs=training_config['max_epochs'],
    learning_rate=training_config['learning_rate'],
    train_batch_size=training_config['batch_size']
)

Hub Utilities Usage

from modelscope import create_model_if_not_exist, HubApi

# Create model repository if it doesn't exist
create_model_if_not_exist(
    model_name='my-custom-model',
    model_type='model',
    visibility=1,  # Public repository
    license='Apache-2.0'
)

# Use with Hub API
api = HubApi()
api.login('your_token')

# Check if model exists before creating
try:
    model_info = api.get_model_info('my-custom-model')
    print(f"Model exists: {model_info['name']}")
except:
    # Model doesn't exist, create it
    create_model_if_not_exist('my-custom-model')
    print("Model repository created")

Device and Hardware Utilities

from modelscope.utils.device import get_device, is_gpu_available

# Get optimal device
device = get_device()
print(f"Selected device: {device}")

# Check GPU availability
if is_gpu_available():
    print("GPU is available for acceleration")
    device = 'cuda'
else:
    print("Using CPU for computation")
    device = 'cpu'

# Use device in model loading
from modelscope import Model
model = Model.from_pretrained('model_name', device=device)

File Utilities

from modelscope.utils.file_utils import get_cache_dir, makedirs

# Get cache directory
cache_dir = get_cache_dir()
print(f"Cache directory: {cache_dir}")

# Create directories safely
output_dir = './output/experiments/run_1'
makedirs(output_dir, exist_ok=True)
print(f"Created directory: {output_dir}")

# File path utilities
from modelscope.utils.file_utils import is_local_path, get_file_extension

path1 = '/local/path/to/file.json'
path2 = 'https://remote.com/file.json'

print(f"Is local path: {is_local_path(path1)}")  # True
print(f"Is local path: {is_local_path(path2)}")  # False

print(f"File extension: {get_file_extension(path1)}")  # .json

Import Utilities

from modelscope.utils.import_utils import (
    is_torch_available,
    is_transformers_available,
    is_opencv_available
)

# Check for optional dependencies
if is_torch_available():
    print("PyTorch is available")
    from modelscope import TorchModel
    # Use PyTorch-specific functionality

if is_transformers_available():
    print("Transformers library is available")
    # Use HuggingFace transformers integration

if is_opencv_available():
    print("OpenCV is available")
    # Use computer vision functionality

# Conditional imports based on availability
def get_model_class():
    if is_torch_available():
        from modelscope import TorchModel
        return TorchModel
    else:
        from modelscope import Model
        return Model

ModelClass = get_model_class()
model = ModelClass.from_pretrained('model_name')

Configuration Templates

from modelscope import read_config, get_logger

# Create configuration template
def create_training_config(
    model_name: str,
    output_dir: str,
    max_epochs: int = 10,
    learning_rate: float = 1e-5
) -> dict:
    """Create standardized training configuration."""
    
    config = {
        'model': {
            'name': model_name,
            'type': 'classification'
        },
        'training': {
            'output_dir': output_dir,
            'max_epochs': max_epochs,
            'learning_rate': learning_rate,
            'batch_size': 32,
            'eval_strategy': 'epoch',
            'save_strategy': 'epoch',
            'logging_steps': 100
        },
        'data': {
            'train_file': 'train.json',
            'eval_file': 'eval.json',
            'test_file': 'test.json'
        }
    }
    
    return config

# Use configuration template
config = create_training_config(
    model_name='damo/nlp_structbert_base_chinese',
    output_dir='./output',
    max_epochs=5,
    learning_rate=2e-5
)

# Save configuration
import json
with open('training_config.json', 'w') as f:
    json.dump(config, f, indent=2)

# Load and use configuration
loaded_config = read_config('training_config.json')
logger = get_logger('training.log')
logger.info(f"Training configuration loaded: {loaded_config['model']['name']}")

Utility Function Composition

from modelscope import get_logger, read_config, Tasks

def setup_experiment(config_path: str, log_file: str = None):
    """
    Set up complete experiment environment.
    
    Parameters:
    - config_path: Path to experiment configuration
    - log_file: Optional log file path
    
    Returns:
    Tuple of (config, logger, task_type)
    """
    
    # Load configuration
    config = read_config(config_path)
    
    # Setup logging
    logger = get_logger(
        log_file=log_file,
        log_level=config.get('log_level', 'INFO')
    )
    
    # Determine task type
    task_type = config.get('task_type', Tasks.NLPTasks.TEXT_CLASSIFICATION)
    
    logger.info(f"Experiment setup complete")
    logger.info(f"Task type: {task_type}")
    logger.info(f"Configuration loaded from: {config_path}")
    
    return config, logger, task_type

# Use utility composition
config, logger, task = setup_experiment(
    'experiment_config.json',
    'experiment.log'
)

# Proceed with experiment using setup
from modelscope import pipeline
pipe = pipeline(task, model=config['model_name'])

Install with Tessl CLI

npx tessl i tessl/pypi-modelscope

docs

datasets.md

export.md

hub.md

index.md

metrics.md

models.md

pipelines.md

preprocessors.md

training.md

utilities.md

tile.json