Comprehensive Python client library for Google Cloud Vertex AI, offering machine learning tools, generative AI models, and MLOps capabilities
—
Modern generative AI capabilities providing streamlined access to Google's most advanced AI models including Gemini for multimodal generation, PaLM for text, and Imagen for image generation. The vertexai package offers simplified APIs designed for rapid development of AI-powered applications.
Advanced multimodal AI models supporting text, images, video, audio, and function calling with sophisticated reasoning capabilities.
class GenerativeModel:
def __init__(
self,
model_name: str,
generation_config: Optional[GenerationConfig] = None,
safety_settings: Optional[SafetySettingsType] = None,
tools: Optional[List[Tool]] = None,
tool_config: Optional[ToolConfig] = None,
system_instruction: Optional[ContentsType] = None,
labels: Optional[Dict[str, str]] = None
): ...
def generate_content(
self,
contents: ContentsType,
generation_config: Optional[GenerationConfigType] = None,
safety_settings: Optional[SafetySettingsType] = None,
tools: Optional[List[Tool]] = None,
tool_config: Optional[ToolConfig] = None,
labels: Optional[Dict[str, str]] = None,
stream: bool = False
) -> GenerationResponse: ...
def count_tokens(
self,
contents: ContentsType,
tools: Optional[List[Tool]] = None
) -> CountTokensResponse: ...
def start_chat(
self,
history: Optional[List[Content]] = None,
response_validation: bool = True
) -> ChatSession: ...
@classmethod
def from_cached_content(
cls,
cached_content: CachedContent,
generation_config: Optional[GenerationConfig] = None,
safety_settings: Optional[SafetySettingsType] = None,
tools: Optional[List[Tool]] = None,
tool_config: Optional[ToolConfig] = None
) -> 'GenerativeModel': ...Basic text generation:
from vertexai.generative_models import GenerativeModel
model = GenerativeModel('gemini-1.5-pro')
response = model.generate_content('Explain quantum computing in simple terms')
print(response.text)Multimodal generation with images:
from vertexai.generative_models import GenerativeModel, Image
model = GenerativeModel('gemini-1.5-pro')
image = Image.load_from_file('photo.jpg')
response = model.generate_content(['What do you see in this image?', image])
print(response.text)Streaming responses:
model = GenerativeModel('gemini-1.5-pro')
stream = model.generate_content('Write a story about space exploration', stream=True)
for chunk in stream:
print(chunk.text, end='')Stateful multi-turn conversations with conversation history management and streaming support.
class ChatSession:
def __init__(
self,
model: GenerativeModel,
history: Optional[List[Content]] = None,
response_validation: bool = True
): ...
def send_message(
self,
content: ContentsType,
generation_config: Optional[GenerationConfigType] = None,
safety_settings: Optional[SafetySettingsType] = None,
tools: Optional[List[Tool]] = None,
labels: Optional[Dict[str, str]] = None,
stream: bool = False
) -> GenerationResponse: ...
def send_message_async(
self,
content: ContentsType,
generation_config: Optional[GenerationConfigType] = None,
safety_settings: Optional[SafetySettingsType] = None,
tools: Optional[List[Tool]] = None,
labels: Optional[Dict[str, str]] = None,
stream: bool = False
) -> Awaitable[GenerationResponse]: ...
@property
def history(self) -> List[Content]: ...Basic chat conversation:
model = GenerativeModel('gemini-1.5-pro')
chat = model.start_chat()
response = chat.send_message('Hello! How are you?')
print(response.text)
response = chat.send_message('Can you help me with Python programming?')
print(response.text)
# View conversation history
print(f"Total messages: {len(chat.history)}")Rich content representation supporting text, images, video, audio, and structured data.
class Content:
def __init__(self, parts: List[Part], role: str = 'user'): ...
@property
def parts(self) -> List[Part]: ...
@property
def role(self) -> str: ...
@property
def text(self) -> str: ...
@classmethod
def from_dict(cls, content_dict: Dict[str, Any]) -> 'Content': ...
def to_dict(self) -> Dict[str, Any]: ...
class Part:
@property
def text(self) -> Optional[str]: ...
@property
def inline_data(self) -> Optional[Blob]: ...
@property
def file_data(self) -> Optional[FileData]: ...
@property
def function_call(self) -> Optional[FunctionCall]: ...
@property
def function_response(self) -> Optional[FunctionResponse]: ...
@staticmethod
def from_text(text: str) -> 'Part': ...
@staticmethod
def from_data(data: bytes, mime_type: str) -> 'Part': ...
@staticmethod
def from_uri(uri: str, mime_type: str) -> 'Part': ...
@staticmethod
def from_image(image: Image) -> 'Part': ...
@staticmethod
def from_function_response(name: str, response: Any) -> 'Part': ...
class Image:
def __init__(self, data: bytes): ...
@property
def data(self) -> bytes: ...
@staticmethod
def load_from_file(location: str) -> 'Image': ...
@staticmethod
def from_bytes(data: bytes) -> 'Image': ...Fine-grained control over model behavior and response characteristics.
class GenerationConfig:
def __init__(
self,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
top_k: Optional[int] = None,
candidate_count: Optional[int] = None,
max_output_tokens: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
response_mime_type: Optional[str] = None,
response_schema: Optional[Dict[str, Any]] = None,
seed: Optional[int] = None,
logprobs: Optional[int] = None,
response_logprobs: Optional[bool] = None
): ...
@classmethod
def from_dict(cls, generation_config_dict: Dict[str, Any]) -> 'GenerationConfig': ...
def to_dict(self) -> Dict[str, Any]: ...Controlled generation:
from vertexai.generative_models import GenerativeModel, GenerationConfig
config = GenerationConfig(
temperature=0.7,
top_p=0.8,
max_output_tokens=1000,
stop_sequences=['END']
)
model = GenerativeModel('gemini-1.5-pro', generation_config=config)
response = model.generate_content('Write a short story')Structured JSON output:
config = GenerationConfig(
response_mime_type='application/json',
response_schema={
'type': 'object',
'properties': {
'name': {'type': 'string'},
'age': {'type': 'integer'},
'skills': {'type': 'array', 'items': {'type': 'string'}}
}
}
)
response = model.generate_content('Create a character profile', generation_config=config)Comprehensive content filtering and safety controls for responsible AI deployment.
class SafetySetting:
def __init__(
self,
category: HarmCategory,
threshold: HarmBlockThreshold,
method: Optional[HarmBlockMethod] = None
): ...
@classmethod
def from_dict(cls, safety_setting_dict: Dict[str, Any]) -> 'SafetySetting': ...
def to_dict(self) -> Dict[str, Any]: ...
# Enums for safety configuration
class HarmCategory(Enum):
HARM_CATEGORY_UNSPECIFIED = 0
HARM_CATEGORY_DEROGATORY = 1
HARM_CATEGORY_TOXICITY = 2
HARM_CATEGORY_VIOLENCE = 3
HARM_CATEGORY_SEXUAL = 4
HARM_CATEGORY_MEDICAL = 5
HARM_CATEGORY_DANGEROUS = 6
HARM_CATEGORY_HARASSMENT = 7
HARM_CATEGORY_HATE_SPEECH = 8
HARM_CATEGORY_SEXUALLY_EXPLICIT = 9
HARM_CATEGORY_DANGEROUS_CONTENT = 10
class HarmBlockThreshold(Enum):
HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0
BLOCK_LOW_AND_ABOVE = 1
BLOCK_MEDIUM_AND_ABOVE = 2
BLOCK_ONLY_HIGH = 3
BLOCK_NONE = 4Enable models to call external functions and APIs for enhanced capabilities and real-time data access.
class Tool:
def __init__(self, function_declarations: List[FunctionDeclaration]): ...
@classmethod
def from_function_declarations(cls, function_declarations: List[FunctionDeclaration]) -> 'Tool': ...
@classmethod
def from_retrieval(cls, retrieval: Retrieval) -> 'Tool': ...
@classmethod
def from_google_search_retrieval(cls, google_search_retrieval: GoogleSearchRetrieval) -> 'Tool': ...
class FunctionDeclaration:
def __init__(
self,
name: str,
description: str,
parameters: Optional[Dict[str, Any]] = None,
response: Optional[Dict[str, Any]] = None
): ...
@classmethod
def from_func(cls, func: Callable) -> 'FunctionDeclaration': ...
def to_dict(self) -> Dict[str, Any]: ...
class FunctionCall:
@property
def name(self) -> str: ...
@property
def args(self) -> Dict[str, Any]: ...
def to_dict(self) -> Dict[str, Any]: ...Define and use functions:
from vertexai.generative_models import GenerativeModel, Tool, FunctionDeclaration
# Define a function
def get_weather(location: str) -> str:
"""Get current weather for a location."""
return f"Weather in {location}: Sunny, 25°C"
# Create function declaration
weather_func = FunctionDeclaration.from_func(get_weather)
tool = Tool([weather_func])
# Use with model
model = GenerativeModel('gemini-1.5-pro', tools=[tool])
response = model.generate_content('What is the weather like in Paris?')
# Check for function calls in response
for candidate in response.candidates:
for part in candidate.content.parts:
if part.function_call:
print(f"Function called: {part.function_call.name}")
print(f"Arguments: {part.function_call.args}")Specialized text generation models optimized for various language tasks with fine-tuning capabilities.
class TextGenerationModel:
@classmethod
def from_pretrained(cls, model_name: str) -> 'TextGenerationModel': ...
def predict(
self,
prompt: str,
max_output_tokens: int = 128,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
candidate_count: Optional[int] = None,
grounding_source: Optional[GroundingSource] = None,
logprobs: Optional[int] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
seed: Optional[int] = None
) -> MultiCandidateTextGenerationResponse: ...
def predict_streaming(self, prompt: str, **kwargs) -> Iterator[TextGenerationResponse]: ...
def tune_model(self, training_data: List[InputOutputTextPair], **kwargs) -> LanguageModelTuningJob: ...
class ChatModel:
@classmethod
def from_pretrained(cls, model_name: str) -> 'ChatModel': ...
def start_chat(
self,
context: Optional[str] = None,
examples: Optional[List[InputOutputTextPair]] = None,
max_output_tokens: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
message_history: Optional[List[ChatMessage]] = None,
stop_sequences: Optional[List[str]] = None
) -> ChatSession: ...Comprehensive response objects with detailed metadata and safety information.
class GenerationResponse:
@property
def candidates(self) -> List[Candidate]: ...
@property
def text(self) -> str: ...
@property
def prompt_feedback(self) -> Optional[PromptFeedback]: ...
@property
def usage_metadata(self) -> Optional[UsageMetadata]: ...
@classmethod
def from_dict(cls, response_dict: Dict[str, Any]) -> 'GenerationResponse': ...
def to_dict(self) -> Dict[str, Any]: ...
class Candidate:
@property
def content(self) -> Content: ...
@property
def finish_reason(self) -> FinishReason: ...
@property
def finish_message(self) -> Optional[str]: ...
@property
def safety_ratings(self) -> List[SafetyRating]: ...
@property
def citation_metadata(self) -> Optional[CitationMetadata]: ...
@property
def text(self) -> str: ...
@property
def function_calls(self) -> List[FunctionCall]: ...
@classmethod
def from_dict(cls, candidate_dict: Dict[str, Any]) -> 'Candidate': ...
def to_dict(self) -> Dict[str, Any]: ...class ResponseValidationError(Exception):
"""Raised when response validation fails."""
pass
class ResponseBlockedError(Exception):
"""Raised when response is blocked by safety filters."""
passCommon error scenarios:
Connect models to external knowledge sources for factual accuracy and real-time information.
class grounding:
class GoogleSearchRetrieval:
def __init__(self, disable_attribution: bool = False): ...
class VertexAISearch:
def __init__(self, datastore: str, project: str): ...
class Retrieval:
def __init__(self, source: VertexAISearch, disable_attribution: bool = False): ...Optimize costs and latency by caching frequently used context.
class CachedContent:
@classmethod
def create(
cls,
model_name: str,
contents: ContentsType,
ttl: Optional[datetime.timedelta] = None,
display_name: Optional[str] = None
) -> 'CachedContent': ...
def update(self, ttl: datetime.timedelta) -> None: ...
def delete(self) -> None: ...This comprehensive API enables building sophisticated AI applications with Google's most advanced generative models, supporting everything from simple text generation to complex multimodal applications with function calling and external knowledge integration.
Install with Tessl CLI
npx tessl i tessl/pypi-google-cloud-aiplatform