CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-google-cloud-aiplatform

Comprehensive Python client library for Google Cloud Vertex AI, offering machine learning tools, generative AI models, and MLOps capabilities

Pending
Overview
Eval results
Files

experiments.mddocs/

Experiment Tracking

Comprehensive experiment management, metrics logging, and artifact tracking with integration to popular ML frameworks. Vertex AI Experiments provides centralized tracking for model development and comparison.

Capabilities

Experiment Management

Create and manage experiments for organizing related model training runs and comparisons.

class Experiment:
    @classmethod
    def create(
        cls,
        experiment_id: str,
        display_name: Optional[str] = None,
        description: Optional[str] = None,
        labels: Optional[Dict[str, str]] = None,
        **kwargs
    ) -> 'Experiment': ...

    @classmethod
    def get(cls, experiment_id: str, **kwargs) -> 'Experiment': ...

    @classmethod
    def list(cls, **kwargs) -> List['Experiment']: ...

    def get_data_frame(self, **kwargs) -> 'pandas.DataFrame': ...

    @property
    def resource_name(self) -> str: ...
    @property
    def experiment_id(self) -> str: ...

Experiment Runs

Individual training runs within experiments with comprehensive metadata and artifact tracking.

class ExperimentRun:
    @classmethod
    def create(
        cls,
        run_id: str,
        experiment: Union[str, Experiment],
        display_name: Optional[str] = None,
        **kwargs
    ) -> 'ExperimentRun': ...

    def log_params(self, params: Dict[str, Union[str, int, float]]) -> None: ...
    def log_metrics(self, metrics: Dict[str, Union[int, float]]) -> None: ...
    def log_classification_metrics(
        self,
        labels: List[str],
        matrix: List[List[int]],
        fpr: Optional[List[float]] = None,
        tpr: Optional[List[float]] = None,
        threshold: Optional[List[float]] = None,
        display_name: Optional[str] = None
    ) -> None: ...

    def log_model(
        self,
        model: Any,
        artifact_id: Optional[str] = None,
        uri: Optional[str] = None,
        input_example: Optional[Any] = None,
        display_name: Optional[str] = None,
        metadata: Optional[Dict] = None,
        **kwargs
    ) -> None: ...

    def log_time_series_metrics(
        self,
        metrics: Dict[str, List[Union[int, float]]],
        step: Optional[List[int]] = None,
        wall_time: Optional[List[Union[int, float]]] = None
    ) -> None: ...

    def end_run(self, state: Optional[str] = None) -> None: ...

    @property
    def resource_name(self) -> str: ...
    @property
    def run_id(self) -> str: ...

Global Experiment Functions

Convenient functions for experiment tracking without explicit experiment/run management.

def init(
    project: str,
    location: str,
    experiment: Optional[str] = None,
    experiment_description: Optional[str] = None,
    staging_bucket: Optional[str] = None,
    credentials: Optional[auth_credentials.Credentials] = None,
    encryption_spec_key_name: Optional[str] = None,
    **kwargs
) -> None: ...

def start_run(
    run: str,
    resume: bool = False,
    experiment: Optional[str] = None,
    **kwargs
) -> ExperimentRun: ...

def end_run(state: Optional[str] = None) -> None: ...

def log_params(params: Dict[str, Union[str, int, float]]) -> None: ...

def log_metrics(metrics: Dict[str, Union[int, float]]) -> None: ...

def log_classification_metrics(
    labels: List[str],
    matrix: List[List[int]],
    fpr: Optional[List[float]] = None,
    tpr: Optional[List[float]] = None,
    threshold: Optional[List[float]] = None,
    display_name: Optional[str] = None
) -> None: ...

def log_model(
    model: Any,
    artifact_id: Optional[str] = None,
    uri: Optional[str] = None,
    input_example: Optional[Any] = None,
    display_name: Optional[str] = None,
    metadata: Optional[Dict] = None,
    **kwargs
) -> None: ...

def log_time_series_metrics(
    metrics: Dict[str, List[Union[int, float]]],
    step: Optional[List[int]] = None,
    wall_time: Optional[List[Union[int, float]]] = None
) -> None: ...

def get_experiment_df(experiment: Optional[str] = None) -> 'pandas.DataFrame': ...

def get_pipeline_df(pipeline: str) -> 'pandas.DataFrame': ...

def autolog(framework: Optional[str] = None, **kwargs) -> None: ...

def start_execution(
    schema_title: str,
    display_name: Optional[str] = None,
    **kwargs
) -> 'Execution': ...

def log(data: Dict[str, Any]) -> None: ...

Metadata Resources

Structured metadata resources for tracking ML artifacts and their relationships.

class Artifact:
    @classmethod
    def create(
        cls,
        schema_title: str,
        uri: Optional[str] = None,
        display_name: Optional[str] = None,
        description: Optional[str] = None,
        metadata: Optional[Dict] = None,
        **kwargs
    ) -> 'Artifact': ...

    def log_params(self, params: Dict[str, Union[str, int, float]]) -> None: ...
    def log_metrics(self, metrics: Dict[str, Union[int, float]]) -> None: ...

    @property
    def resource_name(self) -> str: ...
    @property
    def artifact_id(self) -> str: ...
    @property
    def uri(self) -> Optional[str]: ...
    @property
    def metadata(self) -> Dict: ...

class Execution:
    @classmethod
    def create(
        cls,
        schema_title: str,
        display_name: Optional[str] = None,
        description: Optional[str] = None,
        metadata: Optional[Dict] = None,
        **kwargs
    ) -> 'Execution': ...

    def assign_input_artifacts(self, artifacts: List[Artifact]) -> None: ...
    def assign_output_artifacts(self, artifacts: List[Artifact]) -> None: ...

    @property
    def resource_name(self) -> str: ...
    @property
    def execution_id(self) -> str: ...

class Context:
    @classmethod
    def create(
        cls,
        schema_title: str,
        display_name: Optional[str] = None,
        description: Optional[str] = None,
        metadata: Optional[Dict] = None,
        **kwargs
    ) -> 'Context': ...

    def add_artifacts_and_executions(
        self,
        artifact_resource_names: Optional[List[str]] = None,
        execution_resource_names: Optional[List[str]] = None
    ) -> None: ...

    @property
    def resource_name(self) -> str: ...
    @property
    def context_id(self) -> str: ...

Model Saving

Comprehensive model artifact management with framework integration.

def save_model(
    model: Any,
    artifact_id: str,
    uri: Optional[str] = None,
    input_example: Optional[Any] = None,
    display_name: Optional[str] = None,
    metadata: Optional[Dict] = None,
    **kwargs
) -> Artifact: ...

def get_experiment_model(
    artifact_id: str,
    experiment: Optional[str] = None,
    **kwargs
) -> Any: ...

Usage Examples

Basic experiment tracking:

import google.cloud.aiplatform as aiplatform

# Initialize with experiment
aiplatform.init(
    project='my-project',
    location='us-central1',
    experiment='customer-segmentation',
    experiment_description='Customer segmentation model experiments'
)

# Start a run
aiplatform.start_run('run-001', resume=False)

# Log parameters
aiplatform.log_params({
    'learning_rate': 0.01,
    'batch_size': 32,
    'epochs': 100,
    'model_type': 'xgboost'
})

# Train model (your training code here)
# model = train_model(...)

# Log metrics
aiplatform.log_metrics({
    'accuracy': 0.95,
    'precision': 0.92,
    'recall': 0.89,
    'f1_score': 0.90
})

# Log model
aiplatform.log_model(model, artifact_id='customer-segmentation-v1')

# End run
aiplatform.end_run()

Advanced experiment management:

# Create explicit experiment
experiment = aiplatform.Experiment.create(
    experiment_id='hyperparameter-tuning',
    display_name='Hyperparameter Tuning Experiment',
    description='Testing different hyperparameter combinations'
)

# Create multiple runs
for lr in [0.001, 0.01, 0.1]:
    run = aiplatform.ExperimentRun.create(
        run_id=f'lr-{lr}',
        experiment=experiment
    )
    
    run.log_params({'learning_rate': lr})
    
    # Train and evaluate model
    # accuracy = train_and_evaluate(lr)
    
    run.log_metrics({'accuracy': accuracy})
    run.end_run()

# Get experiment results as DataFrame
df = experiment.get_data_frame()
print(df[['run_name', 'param.learning_rate', 'metric.accuracy']])

Framework integration with autolog:

# Enable automatic logging for supported frameworks
aiplatform.autolog(framework='tensorflow')

# Or enable for multiple frameworks
aiplatform.autolog()  # Auto-detects framework

# Your training code - metrics/parameters logged automatically
model = tf.keras.Sequential([...])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_val, y_val))

This comprehensive experiment tracking system provides full lifecycle management for ML experiments with automatic integration to popular frameworks and detailed metadata tracking.

Install with Tessl CLI

npx tessl i tessl/pypi-google-cloud-aiplatform

docs

batch.md

datasets.md

experiments.md

feature-store.md

generative-ai.md

index.md

models.md

pipelines.md

training.md

vector-search.md

vision.md

tile.json