Google Cloud Vision AI API client library for building and deploying Vertex AI Vision applications
—
Core type definitions, enums, and data structures shared across all Google Cloud Vision AI services. These types provide the foundation for video analytics, streaming, asset management, and application configuration.
class OperationMetadata:
"""Metadata for long-running operations."""
create_time: Timestamp # Operation creation time
end_time: Timestamp # Operation completion time
target: str # Target resource for operation
verb: str # Operation verb (create, update, delete, etc.)
status_message: str # Human-readable status message
requested_cancellation: bool # Whether cancellation was requested
api_version: str # API version used for operation
class FieldMask:
"""Specifies fields to update in update operations."""
paths: List[str] # List of field paths to update
class Timestamp:
"""Timestamp representation."""
seconds: int # Seconds since Unix epoch
nanos: int # Nanoseconds component (0-999,999,999)
class Duration:
"""Duration representation."""
seconds: int # Duration in seconds
nanos: int # Nanoseconds component
class Empty:
"""Empty response type."""
passclass GcsSource:
"""Google Cloud Storage source specification."""
uris: List[str] # List of GCS URIs
class StringArray:
"""Array of string values for search criteria."""
txt_values: List[str] # String values
class IntArray:
"""Array of integer values for search criteria."""
int_values: List[int] # Integer values
class FloatArray:
"""Array of float values for search criteria."""
float_values: List[float] # Float valuesclass MachineSpec:
"""Machine specification for deployment."""
machine_type: str # Machine type (e.g., "n1-standard-4")
class DedicatedResources:
"""Dedicated resource allocation."""
machine_spec: MachineSpec # Machine specification
min_replica_count: int # Minimum number of replicas
max_replica_count: int # Maximum number of replicas
autoscaling_metric_specs: List[AutoscalingMetricSpec] # Autoscaling configuration
class AutoscalingMetricSpec:
"""Autoscaling metric specification."""
metric_name: str # Name of metric to track
# Union field oneof target:
target: int # Target value for metric
class AcceleratorType(Enum):
"""Hardware accelerator types."""
ACCELERATOR_TYPE_UNSPECIFIED = 0
NVIDIA_TESLA_K80 = 1 # NVIDIA Tesla K80
NVIDIA_TESLA_P4 = 2 # NVIDIA Tesla P4
NVIDIA_TESLA_P100 = 3 # NVIDIA Tesla P100
NVIDIA_TESLA_V100 = 4 # NVIDIA Tesla V100
NVIDIA_TESLA_T4 = 5 # NVIDIA Tesla T4
TPU_V2 = 6 # Cloud TPU v2
TPU_V3 = 7 # Cloud TPU v3class VertexAutoMLVisionConfig:
"""Vertex AutoML Vision configuration."""
confidence_threshold: float # Confidence threshold for predictions
max_predictions: int # Maximum number of predictions
class VertexAutoMLVideoConfig:
"""Vertex AutoML Video configuration."""
confidence_threshold: float # Confidence threshold for predictions
blocked_labels: List[str] # Labels to block from predictions
max_predictions: int # Maximum number of predictions
class VertexCustomConfig:
"""Custom Vertex AI model configuration."""
machine_spec: MachineSpec # Machine specification
dedicated_resources: DedicatedResources # Resource allocation
post_processing_cloud_function: str # Post-processing function
class GeneralObjectDetectionConfig:
"""General object detection configuration."""
pass # Configuration for general object detection
class PersonBlurConfig:
"""Person blurring configuration."""
person_blur_type: PersonBlurType # Type of blurring to apply
faces_only: bool # Whether to blur faces only
class PersonBlurType(Enum):
"""Types of person blurring."""
PERSON_BLUR_TYPE_UNSPECIFIED = 0
FULL_BODY = 1 # Blur entire person
FACE_ONLY = 2 # Blur face only
class OccupancyCountConfig:
"""Occupancy counting configuration."""
enable_people_counting: bool # Enable people counting
enable_dwelling_time_tracking: bool # Enable dwelling time tracking
class PersonVehicleDetectionConfig:
"""Person and vehicle detection configuration."""
enable_people_counting: bool # Enable people counting
enable_vehicle_counting: bool # Enable vehicle counting
class PersonalProtectiveEquipmentDetectionConfig:
"""PPE detection configuration."""
enable_face_coverage_detection: bool # Detect face coverage
enable_head_coverage_detection: bool # Detect head coverage
enable_hands_coverage_detection: bool # Detect hand coverageclass MediaWarehouseConfig:
"""Media warehouse output configuration."""
corpus: str # Target corpus for output
region: str # Region for storage
ttl: Duration # Time-to-live for stored data
class GcsOutputConfig:
"""Google Cloud Storage output configuration."""
bucket: str # Target GCS bucket
reporting_enabled: bool # Enable output reporting
class BigQueryConfig:
"""BigQuery output configuration."""
table: str # Target BigQuery table
cloud_function_mapping: Dict[str, str] # Function mappings
create_default_table_if_not_exists: bool # Create table if missingclass NormalizedVertex:
"""Normalized coordinate point (0.0 to 1.0)."""
x: float # X coordinate (0.0 to 1.0)
y: float # Y coordinate (0.0 to 1.0)
class NormalizedPolygon:
"""Polygon with normalized coordinates."""
normalized_vertices: List[NormalizedVertex] # Polygon vertices
class NormalizedPolyline:
"""Polyline with normalized coordinates."""
normalized_vertices: List[NormalizedVertex] # Polyline vertices
class GeoCoordinate:
"""Geographic coordinate."""
latitude: float # Latitude in degrees
longitude: float # Longitude in degreesclass ClassificationPredictionResult:
"""Classification prediction results."""
classifications: List[Classification] # Classification results
class Classification:
"""Individual classification result."""
score: float # Confidence score
class_name: str # Predicted class name
class ObjectDetectionPredictionResult:
"""Object detection prediction results."""
identified_boxes: List[IdentifiedBox] # Detected objects
class IdentifiedBox:
"""Detected object with bounding box."""
entity: Entity # Detected entity information
normalized_bounding_box: NormalizedBoundingBox # Bounding box coordinates
confidence_score: float # Detection confidence
class Entity:
"""Detected entity information."""
entity_id: str # Entity identifier
label_string: str # Human-readable label
class NormalizedBoundingBox:
"""Normalized bounding box coordinates."""
xmin: float # Left edge (0.0 to 1.0)
ymin: float # Top edge (0.0 to 1.0)
xmax: float # Right edge (0.0 to 1.0)
ymax: float # Bottom edge (0.0 to 1.0)
class VideoActionRecognitionPredictionResult:
"""Video action recognition results."""
actions: List[ActionRecognition] # Recognized actions
class ActionRecognition:
"""Individual action recognition result."""
action_name: str # Name of recognized action
confidence: float # Recognition confidence
timespan: TimeSpan # Time span of action
class TimeSpan:
"""Time span specification."""
start_time_offset: Duration # Start time offset
end_time_offset: Duration # End time offset
class VideoObjectTrackingPredictionResult:
"""Video object tracking results."""
objects: List[ObjectTracking] # Tracked objects
class ObjectTracking:
"""Individual object tracking result."""
entity: Entity # Tracked entity
confidence: float # Tracking confidence
track_id: int # Unique track identifier
normalized_bounding_box: NormalizedBoundingBox # Object location
class OccupancyCountingPredictionResult:
"""Occupancy counting results."""
current_count: int # Current occupancy count
dwell_time_info: List[DwellTimeInfo] # Dwelling time information
class DwellTimeInfo:
"""Dwelling time information."""
track_id: str # Track identifier
zone_id: str # Zone identifier
dwell_start_time: Timestamp # Dwell start time
dwell_end_time: Timestamp # Dwell end time
class PersonalProtectiveEquipmentDetectionOutput:
"""PPE detection results."""
current_time: Timestamp # Detection timestamp
detected_persons: List[PPEIdentifiedBox] # Detected persons with PPE
class PPEIdentifiedBox:
"""Person detection with PPE information."""
box_id: int # Box identifier
normalized_bounding_box: NormalizedBoundingBox # Person location
confidence_score: float # Detection confidence
ppe_entity: List[PPEEntity] # PPE entities detected
class PPEEntity:
"""PPE entity information."""
ppe_label_id: int # PPE label identifier
ppe_label_string: str # PPE label description
ppe_supercategory_label_string: str # PPE category
ppe_confidence_score: float # PPE detection confidenceclass StreamAnnotation:
"""Individual stream annotation."""
id: str # Annotation identifier
source_stream: str # Source stream
type: StreamAnnotationType # Annotation type
# Union field oneof annotation:
active_zone_counting_annotation: ActiveZoneCountingAnnotation # Zone counting
crossing_line_counting_annotation: CrossingLineCountingAnnotation # Line crossing
object_detection_annotation: ObjectDetectionStreamAnnotation # Object detection
object_tracking_annotation: ObjectTrackingStreamAnnotation # Object tracking
class StreamAnnotationType(Enum):
"""Types of stream annotations."""
STREAM_ANNOTATION_TYPE_UNSPECIFIED = 0
ACTIVE_ZONE_COUNTING = 1 # Active zone counting
CROSSING_LINE_COUNTING = 2 # Crossing line counting
OBJECT_DETECTION = 3 # Object detection
OBJECT_TRACKING = 4 # Object tracking
class ActiveZoneCountingAnnotation:
"""Active zone counting annotation."""
counting_line_annotations: List[NormalizedPolyline] # Counting lines
class CrossingLineCountingAnnotation:
"""Crossing line counting annotation."""
counting_line_annotations: List[NormalizedPolyline] # Counting lines
class ObjectDetectionStreamAnnotation:
"""Object detection stream annotation."""
bounding_box: NormalizedBoundingBox # Detection bounding box
class ObjectTrackingStreamAnnotation:
"""Object tracking stream annotation."""
track_id: str # Tracking identifier
bounding_box: NormalizedBoundingBox # Tracking bounding boxclass AppPlatformEventBody:
"""Event body for app platform notifications."""
event_message: str # Event message content
event_id: str # Event identifier
class AppPlatformMetadata:
"""Metadata for app platform operations."""
application: str # Application resource path
instance_id: str # Instance identifier
node: str # Processing node
processor: str # Processor resource path
class AppPlatformCloudFunctionRequest:
"""Request for app platform cloud function."""
annotations: List[AppPlatformEventBody] # Event annotations
application_metadata: AppPlatformMetadata # Application metadata
class AppPlatformCloudFunctionResponse:
"""Response from app platform cloud function."""
annotations: List[StreamAnnotation] # Processed annotations
events: List[AppPlatformEventBody] # Generated eventsclass DataSchema:
"""Schema definition for structured data."""
key: str # Schema key identifier
# Union field oneof schema_details:
list_config: ListConfig # List configuration
class ListConfig:
"""Configuration for list-type data."""
pass # List configuration options
class FacetGroup:
"""Faceted search group."""
facet_id: str # Facet identifier
facet_values: List[FacetValue] # Facet values
class FacetValue:
"""Individual facet value."""
value: str # Facet value
selected: bool # Whether value is selected
class FacetProperty:
"""Facet property definition."""
fixed_range_bucket_spec: FixedRangeBucketSpec # Fixed range buckets
custom_range_bucket_spec: CustomRangeBucketSpec # Custom range buckets
datetime_bucket_spec: DateTimeBucketSpec # DateTime buckets
mapped_fields: List[str] # Mapped field names
class FixedRangeBucketSpec:
"""Fixed range bucket specification."""
bucket_start: float # Bucket start value
bucket_granularity: float # Bucket size
bucket_count: int # Number of buckets
class CustomRangeBucketSpec:
"""Custom range bucket specification."""
endpoints: List[float] # Custom bucket endpoints
class DateTimeBucketSpec:
"""DateTime bucket specification."""
granularity: DateTimeBucketSpecGranularity # Time granularity
class DateTimeBucketSpecGranularity(Enum):
"""DateTime bucket granularities."""
GRANULARITY_UNSPECIFIED = 0
YEAR = 1 # Yearly buckets
MONTH = 2 # Monthly buckets
DAY = 3 # Daily buckets
HOUR = 4 # Hourly bucketsclass DateTimeRange:
"""Date and time range specification."""
start_datetime: Timestamp # Range start time
end_datetime: Timestamp # Range end time
class FloatRange:
"""Floating point number range."""
start: float # Range start value
end: float # Range end value
class IntRange:
"""Integer number range."""
start: int # Range start value
end: int # Range end value
class BooleanCriteria:
"""Boolean search criteria."""
value: bool # Boolean value to match
class FeatureCriteria:
"""Feature-based search criteria."""
# Union field oneof feature:
image_query: ImageQuery # Image-based query
text_query: str # Text-based query
class ImageQuery:
"""Image-based search query."""
# Union field oneof image:
input_image: bytes # Raw image data
asset: str # Asset containing reference imageclass InputEdge:
"""Input edge connecting processing nodes."""
parent_node: str # Parent node name
parent_output: str # Parent output name
child_input: str # Child input name
class GraphInputChannelSpec:
"""Input channel specification for processing graph."""
name: str # Channel name
data_type: DataType # Channel data type
accepted_data_type_ids: List[str] # Accepted data type IDs
required: bool # Whether input is required
default_value: AttributeValue # Default value if not provided
class GraphOutputChannelSpec:
"""Output channel specification for processing graph."""
name: str # Channel name
data_type: DataType # Channel data type
class InstanceResourceInputBindingSpec:
"""Input binding specification for instances."""
config_type_url: str # Configuration type URL
resource_type_url: str # Resource type URL
class InstanceResourceOutputBindingSpec:
"""Output binding specification for instances."""
config_type_url: str # Configuration type URL
resource_type_url: str # Resource type URL
class DataType(Enum):
"""Data types for processing channels."""
DATA_TYPE_UNSPECIFIED = 0
VIDEO = 1 # Video data
PROTO = 2 # Protocol buffer data
IMAGE = 3 # Image dataclass CustomProcessorSourceInfo:
"""Source information for custom processors."""
# Union field oneof artifact_path:
vertex_model: str # Vertex AI model resource
source_type: CustomProcessorSourceInfoSourceType # Source type
class CustomProcessorSourceInfoSourceType(Enum):
"""Source types for custom processors."""
SOURCE_TYPE_UNSPECIFIED = 0
VERTEX_AUTOML = 1 # Vertex AutoML source
VERTEX_CUSTOM = 2 # Vertex Custom source
GENERAL_PROCESSOR = 3 # General processor source
class ApplicationRuntimeInfo:
"""Runtime information for applications."""
deploy_time: Timestamp # Deployment time
# Union field oneof runtime_info:
global_output_resources: List[OutputResourceBinding] # Global output resources
class ApplicationEventDeliveryConfig:
"""Event delivery configuration for applications."""
channel: str # Delivery channel
minimal_delivery_interval: Duration # Minimum delivery intervalfrom google.cloud import visionai_v1
# Example: Creating temporal partition for annotation
temporal_partition = visionai_v1.Partition(
temporal_partition=visionai_v1.TemporalPartition(
start_time=visionai_v1.Timestamp(seconds=1725926400),
end_time=visionai_v1.Timestamp(seconds=1725930000)
)
)
# Example: Creating search criteria with ranges
search_criteria = [
visionai_v1.Criteria(
date_time_range_criteria=visionai_v1.DateTimeRangeCriteria(
date_time_ranges=[
visionai_v1.DateTimeRange(
start_datetime=visionai_v1.Timestamp(seconds=1725926400),
end_datetime=visionai_v1.Timestamp(seconds=1725930000)
)
]
)
),
visionai_v1.Criteria(
float_range_criteria=visionai_v1.FloatRangeCriteria(
float_ranges=[
visionai_v1.FloatRange(start=0.7, end=1.0) # High confidence only
]
)
)
]
# Example: Creating normalized bounding box
bounding_box = visionai_v1.NormalizedBoundingBox(
xmin=0.1, # 10% from left
ymin=0.2, # 20% from top
xmax=0.9, # 90% from left
ymax=0.8 # 80% from top
)
# Example: Creating update mask
update_mask = visionai_v1.FieldMask(
paths=["display_name", "description", "labels"]
)class HealthCheckRequest:
"""Request for health check operation."""
cluster: str # Cluster resource path to check
class HealthCheckResponse:
"""Response containing health check results."""
cluster_info: ClusterInfo # Detailed cluster health information
class ClusterInfo:
"""Cluster health and status information."""
cluster_name: str # Name of the cluster
cluster_id: str # Unique cluster identifier
# Additional health status details and metricsThese types form the foundation for all operations across the Google Cloud Vision AI package, providing consistent interfaces for video analytics, asset management, streaming operations, and application configuration.
Install with Tessl CLI
npx tessl i tessl/pypi-google-cloud-visionai