An integration package connecting Google's genai package and LangChain
—
Content safety controls and configuration options for responsible AI deployment. Provides comprehensive filtering capabilities, error handling, and system configuration for all Google Generative AI components.
Categories of potentially harmful content that can be filtered by Google's safety systems.
# From google.ai.generativelanguage_v1beta.HarmCategory
HarmCategory.HARM_CATEGORY_HARASSMENT
HarmCategory.HARM_CATEGORY_HATE_SPEECH
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT
HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITYCategories:
HARM_CATEGORY_HARASSMENT: Content that harasses, intimidates, bullies, or abusesHARM_CATEGORY_HATE_SPEECH: Content that promotes hatred against individuals or groupsHARM_CATEGORY_SEXUALLY_EXPLICIT: Content containing explicit sexual materialHARM_CATEGORY_DANGEROUS_CONTENT: Content that could facilitate harm to individuals or groupsHARM_CATEGORY_CIVIC_INTEGRITY: Content that could undermine civic processes or institutionsThreshold levels for content filtering, determining when to block potentially harmful content.
# From google.ai.generativelanguage_v1beta.SafetySetting.HarmBlockThreshold
HarmBlockThreshold.BLOCK_NONE
HarmBlockThreshold.BLOCK_ONLY_HIGH
HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
HarmBlockThreshold.BLOCK_LOW_AND_ABOVEThresholds:
BLOCK_NONE: Don't block any content based on this categoryBLOCK_ONLY_HIGH: Block only high-probability harmful contentBLOCK_MEDIUM_AND_ABOVE: Block medium and high-probability harmful content (recommended)BLOCK_LOW_AND_ABOVE: Block low, medium, and high-probability harmful content (most restrictive)Generation modality options for multimodal capabilities.
# From google.ai.generativelanguage_v1beta.GenerationConfig.Modality
Modality.TEXT
Modality.IMAGE
Modality.AUDIOModalities:
TEXT: Text-only generationIMAGE: Image generation capabilitiesAUDIO: Audio generation capabilitiesException raised when trying to access non-existent vector store resources. This is the only exception class exported by the package.
class DoesNotExistsException(Exception):
def __init__(self, *, corpus_id: str, document_id: Optional[str] = None)Parameters:
corpus_id (str): The corpus ID that doesn't existdocument_id (Optional[str]): The document ID that doesn't exist (if applicable)Type definition for safety setting configuration.
SafetySettingDict = TypedDict('SafetySettingDict', {
'category': HarmCategory,
'threshold': HarmBlockThreshold
})from langchain_google_genai import (
ChatGoogleGenerativeAI,
HarmCategory,
HarmBlockThreshold
)
# Configure comprehensive safety settings
safety_settings = {
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
# Initialize model with safety settings
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-pro",
safety_settings=safety_settings
)
try:
response = llm.invoke("Generate helpful and safe content about AI")
print(response.content)
except Exception as e:
print(f"Generation error: {e}")# Strict safety settings (most restrictive)
strict_settings = {
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
}
strict_llm = ChatGoogleGenerativeAI(
model="gemini-2.5-pro",
safety_settings=strict_settings
)
# Permissive settings (less restrictive)
permissive_settings = {
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
permissive_llm = ChatGoogleGenerativeAI(
model="gemini-2.5-pro",
safety_settings=permissive_settings
)from langchain_google_genai import Modality
# Configure for multimodal output
multimodal_llm = ChatGoogleGenerativeAI(
model="gemini-2.0-flash",
response_modalities=[Modality.TEXT, Modality.IMAGE],
safety_settings={
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
)
response = multimodal_llm.invoke("Create a diagram explaining photosynthesis")
print("Response includes:", response.response_metadata.get("modalities", ["text"]))from langchain_google_genai import ChatGoogleGenerativeAI
def safe_generate(prompt: str, max_retries: int = 3):
"""Generate content with comprehensive error handling."""
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-pro",
safety_settings={
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
)
for attempt in range(max_retries):
try:
response = llm.invoke(prompt)
return {"success": True, "content": response.content, "attempt": attempt + 1}
except Exception as e:
if "safety" in str(e).lower():
return {
"success": False,
"error": "Content blocked by safety filters",
"details": str(e)
}
elif "api" in str(e).lower() or "rate" in str(e).lower():
if attempt < max_retries - 1:
print(f"Attempt {attempt + 1} failed, retrying...")
continue
else:
return {
"success": False,
"error": "API error after retries",
"details": str(e)
}
else:
return {
"success": False,
"error": "Generation error",
"details": str(e)
}
# Use safe generation
result = safe_generate("Explain quantum computing")
if result["success"]:
print(f"Generated content: {result['content']}")
else:
print(f"Generation failed: {result['error']}")from langchain_google_genai import GoogleVectorStore, DoesNotExistsException
def safe_vector_operations():
"""Demonstrate safe vector store operations."""
try:
# Try to connect to existing corpus
vector_store = GoogleVectorStore(corpus_id="my-corpus")
print("Connected to existing corpus")
except DoesNotExistsException as e:
print(f"Corpus doesn't exist: {e}")
# Create new corpus
vector_store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus",
display_name="My Knowledge Base"
)
print("Created new corpus")
try:
# Try to add content to specific document
vector_store.add_texts(
["Sample content"],
document_id="specific-doc"
)
except DoesNotExistsException as e:
print(f"Document doesn't exist: {e}")
# Create document first
doc_store = GoogleVectorStore.create_document(
corpus_id="my-corpus",
document_id="specific-doc",
display_name="My Document"
)
doc_store.add_texts(["Sample content"])
print("Created document and added content")
safe_vector_operations()def validate_safety_settings(settings: Dict[HarmCategory, HarmBlockThreshold]) -> bool:
"""Validate safety settings configuration."""
required_categories = {
HarmCategory.HARM_CATEGORY_HARASSMENT,
HarmCategory.HARM_CATEGORY_HATE_SPEECH,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
}
# Check if all required categories are covered
missing_categories = required_categories - set(settings.keys())
if missing_categories:
print(f"Warning: Missing safety settings for: {missing_categories}")
return False
# Check for overly permissive settings
permissive_settings = [
(category, threshold) for category, threshold in settings.items()
if threshold == HarmBlockThreshold.BLOCK_NONE
]
if permissive_settings:
print(f"Warning: Permissive settings detected: {permissive_settings}")
return True
# Validate settings before use
safety_config = {
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
if validate_safety_settings(safety_config):
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-pro",
safety_settings=safety_config
)import os
def get_safety_config():
"""Get safety configuration based on environment."""
env = os.getenv("DEPLOYMENT_ENV", "production")
if env == "development":
# More permissive for development
return {
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
else:
# Strict for production
return {
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
# Use environment-based configuration
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-pro",
safety_settings=get_safety_config()
)def monitor_safety_blocks(llm, prompts: List[str]):
"""Monitor safety blocking rates for a set of prompts."""
total_prompts = len(prompts)
blocked_prompts = 0
successful_prompts = 0
results = []
for prompt in prompts:
try:
response = llm.invoke(prompt)
successful_prompts += 1
results.append({
"prompt": prompt[:50] + "...",
"status": "success",
"response_length": len(response.content)
})
except Exception as e:
if "safety" in str(e).lower():
blocked_prompts += 1
results.append({
"prompt": prompt[:50] + "...",
"status": "blocked",
"reason": "safety filter"
})
else:
results.append({
"prompt": prompt[:50] + "...",
"status": "error",
"reason": str(e)
})
# Print statistics
print(f"Safety Monitoring Results:")
print(f"Total prompts: {total_prompts}")
print(f"Successful: {successful_prompts} ({successful_prompts/total_prompts:.1%})")
print(f"Blocked: {blocked_prompts} ({blocked_prompts/total_prompts:.1%})")
return results
# Example monitoring
test_prompts = [
"Explain machine learning",
"Write a story about robots",
"Describe cooking techniques",
]
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-pro",
safety_settings={
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}
)
monitoring_results = monitor_safety_blocks(llm, test_prompts)BLOCK_MEDIUM_AND_ABOVE for most categoriesInstall with Tessl CLI
npx tessl i tessl/pypi-langchain-google-genai