EdgeDB Python driver providing both blocking IO and asyncio implementations for connecting to and interacting with EdgeDB databases.
—
EdgeDB AI capabilities for retrieval-augmented generation (RAG) queries, natural language database interactions, and embeddings generation using EdgeDB's integrated AI features.
Factory functions for creating AI-enabled EdgeDB clients that can process natural language queries using RAG (Retrieval-Augmented Generation).
def create_ai(client: Client, **kwargs) -> EdgeDBAI:
"""
Create a synchronous EdgeDB AI client.
Parameters:
- client: EdgeDB Client instance (synchronous)
- **kwargs: AIOptions arguments (model, prompt)
Returns:
EdgeDBAI instance for AI-powered RAG operations
"""
def create_async_ai(client: AsyncIOClient, **kwargs) -> AsyncEdgeDBAI:
"""
Create an asynchronous EdgeDB AI client.
Parameters:
- client: AsyncIOClient instance
- **kwargs: AIOptions arguments (model, prompt)
Returns:
AsyncEdgeDBAI instance for async AI RAG operations
"""Main classes for AI-powered database interactions using RAG and embeddings.
class EdgeDBAI:
"""
Synchronous EdgeDB AI client.
Provides RAG (Retrieval-Augmented Generation) query capabilities
and AI-powered database interactions using EdgeDB's integrated AI features.
"""
def query_rag(
self,
message: str,
context: Optional[QueryContext] = None
) -> str:
"""
Execute RAG query using natural language message.
Parameters:
- message: Natural language query or message
- context: Query context for AI processing
Returns:
RAG response string based on AI interpretation
"""
def stream_rag(
self,
message: str,
context: Optional[QueryContext] = None
) -> Iterator[str]:
"""
Execute streaming RAG query using natural language message.
Parameters:
- message: Natural language query or message
- context: Query context for AI processing
Returns:
Iterator yielding streaming RAG response chunks
"""
def generate_embeddings(
self,
*inputs: str,
model: str
) -> List[float]:
"""
Generate embeddings for input texts.
Parameters:
- *inputs: Text inputs to generate embeddings for
- model: Embedding model identifier
Returns:
List of floating-point embedding values
"""
def with_config(self, **kwargs) -> EdgeDBAI:
"""
Create new AI client with modified configuration.
Parameters:
- **kwargs: AIOptions parameters to override
Returns:
New EdgeDBAI instance with updated configuration
"""
def with_context(self, **kwargs) -> EdgeDBAI:
"""
Create new AI client with modified context.
Parameters:
- **kwargs: QueryContext parameters to override
Returns:
New EdgeDBAI instance with updated context
"""
class AsyncEdgeDBAI:
"""
Asynchronous EdgeDB AI client.
Async version of EdgeDBAI with identical method signatures
but async/await support for RAG and embedding operations.
"""
async def query_rag(
self,
message: str,
context: Optional[QueryContext] = None
) -> str:
"""Async version of EdgeDBAI.query_rag()."""
async def stream_rag(
self,
message: str,
context: Optional[QueryContext] = None
) -> AsyncIterator[str]:
"""Async version of EdgeDBAI.stream_rag()."""
async def generate_embeddings(
self,
*inputs: str,
model: str
) -> List[float]:
"""Async version of EdgeDBAI.generate_embeddings()."""
def with_config(self, **kwargs) -> AsyncEdgeDBAI:
"""Async version of EdgeDBAI.with_config()."""
def with_context(self, **kwargs) -> AsyncEdgeDBAI:
"""Async version of EdgeDBAI.with_context()."""Configuration classes and types for AI query processing and RAG operations.
class AIOptions:
"""
AI query configuration options.
Controls AI model selection and prompt configuration for RAG operations.
"""
def __init__(self, model: str, prompt: Optional[Prompt] = None):
"""
Create AI options.
Parameters:
- model: AI model identifier
- prompt: Custom prompt configuration
"""
def derive(self, kwargs: Dict[str, Any]) -> AIOptions:
"""
Create derived AIOptions with modified parameters.
Parameters:
- kwargs: Parameters to override
Returns:
New AIOptions instance with updated parameters
"""
class QueryContext:
"""
AI query context for RAG processing.
Provides context information for AI query processing including
variables, globals, and processing constraints.
"""
def __init__(
self,
query: str = "",
variables: Optional[Dict[str, Any]] = None,
globals: Optional[Dict[str, Any]] = None,
max_object_count: Optional[int] = None
):
"""
Create query context.
Parameters:
- query: Query string context
- variables: Query variables for processing
- globals: Global variables for context
- max_object_count: Maximum number of objects to process
"""
def derive(self, kwargs: Dict[str, Any]) -> QueryContext:
"""
Create derived QueryContext with modified parameters.
Parameters:
- kwargs: Parameters to override
Returns:
New QueryContext instance with updated parameters
"""
class Prompt:
"""
AI prompt configuration.
Manages prompts and templates for AI RAG query generation.
"""
name: Optional[str]
"""Named prompt template identifier."""
id: Optional[str]
"""Prompt identifier."""
custom: Optional[List[Custom]]
"""Custom prompt conversation history."""
class ChatParticipantRole(Enum):
"""
Chat participant roles for conversation context.
Defines roles in AI chat conversations for RAG processing.
"""
SYSTEM = "system" # System instructions and context
USER = "user" # User queries and requests
ASSISTANT = "assistant" # AI assistant responses
TOOL = "tool" # Tool/function call results
class Custom(TypedDict):
"""
Custom chat message for prompt configuration.
Used in Prompt.custom for conversation history.
"""
role: ChatParticipantRole
content: strimport edgedb
from edgedb.ai import create_ai
# Create regular client
client = edgedb.create_client()
# Create AI client with RAG capabilities
ai = create_ai(client, model="gpt-4")
# Natural language RAG queries
response = ai.query_rag("What users have registered in the last month?")
print(f"RAG Response: {response}")
# Get information about database structure
schema_info = ai.query_rag("Describe the schema and main object types")
print(f"Schema Info: {schema_info}")
# Ask about data patterns
patterns = ai.query_rag("What are the most common user activity patterns?")
print(f"Patterns: {patterns}")import edgedb
from edgedb.ai import create_ai
client = edgedb.create_client()
ai = create_ai(client, model="gpt-4")
# Stream RAG response for long queries
message = "Analyze user engagement trends and provide detailed insights"
print("RAG Response (streaming):")
for chunk in ai.stream_rag(message):
print(chunk, end="", flush=True)
print() # New line after streamingimport asyncio
import edgedb
from edgedb.ai import create_async_ai
async def main():
# Create async client
client = edgedb.create_async_client()
# Create async AI client
ai = create_async_ai(client, model="gpt-4")
# Natural language RAG queries with async/await
response = await ai.query_rag("Find patterns in user behavior data")
print(f"Async RAG Response: {response}")
# Streaming async RAG
print("Streaming RAG response:")
async for chunk in ai.stream_rag("Provide detailed analysis of recent data"):
print(chunk, end="", flush=True)
print()
await client.aclose()
asyncio.run(main())import edgedb
from edgedb.ai import create_ai, QueryContext
client = edgedb.create_client()
ai = create_ai(client, model="gpt-4")
# Create custom context for RAG queries
context = QueryContext(
query="user_analysis",
variables={"timeframe": "last_30_days"},
globals={"app_name": "my_app"},
max_object_count=1000
)
# Query with custom context
response = ai.query_rag(
"Analyze user engagement within the specified timeframe",
context=context
)
print(f"Contextual RAG Response: {response}")
# Use with_context for modified context
ai_with_context = ai.with_context(
variables={"timeframe": "last_7_days"},
max_object_count=500
)
weekly_response = ai_with_context.query_rag("Show recent user activity")
print(f"Weekly Response: {weekly_response}")import edgedb
from edgedb.ai import create_ai
client = edgedb.create_client()
ai = create_ai(client, model="gpt-4")
# Generate embeddings for text inputs
texts = [
"User authentication and security",
"Database query optimization",
"Real-time data synchronization"
]
embeddings = ai.generate_embeddings(*texts, model="text-embedding-ada-002")
print(f"Generated {len(embeddings)} embedding dimensions")
print(f"First few values: {embeddings[:5]}")
# Use embeddings for similarity search or clustering
# (Integration with vector database operations)import edgedb
from edgedb.ai import create_ai, AIOptions, Prompt, ChatParticipantRole
client = edgedb.create_client()
# Create AI client with specific configuration
ai_options = AIOptions(
model="gpt-4",
prompt=Prompt(
name="database_analyst",
custom=[
{
"role": ChatParticipantRole.SYSTEM,
"content": "You are a database analyst specializing in EdgeDB schemas and query optimization."
},
{
"role": ChatParticipantRole.USER,
"content": "Focus on providing actionable insights about data patterns."
}
]
)
)
ai = create_ai(client, **ai_options.__dict__)
# Use configured AI client
analysis = ai.query_rag("What optimization opportunities exist in this database?")
print(f"Expert Analysis: {analysis}")
# Create variant with different model
ai_variant = ai.with_config(model="gpt-3.5-turbo")
quick_summary = ai_variant.query_rag("Provide a brief database summary")
print(f"Quick Summary: {quick_summary}")import edgedb
from edgedb.ai import create_ai
from edgedb import EdgeDBError
client = edgedb.create_client()
ai = create_ai(client, model="gpt-4")
def safe_rag_query(ai_client, message, max_retries=3):
"""Execute RAG query with error handling and retries."""
for attempt in range(max_retries):
try:
response = ai_client.query_rag(message)
return response
except EdgeDBError as e:
print(f"EdgeDB error on attempt {attempt + 1}: {e}")
if attempt == max_retries - 1:
raise
except Exception as e:
print(f"AI error on attempt {attempt + 1}: {e}")
if attempt == max_retries - 1:
return f"Unable to process query after {max_retries} attempts: {e}"
return None
# Safe RAG query execution
result = safe_rag_query(ai, "Analyze database performance metrics")
if result:
print(f"RAG Result: {result}")
else:
print("Query failed after all retries")import edgedb
from edgedb.ai import create_ai
client = edgedb.create_client()
ai = create_ai(client, model="gpt-4")
def hybrid_analysis(natural_query: str, fallback_edgeql: str = None):
"""Combine RAG with traditional EdgeQL for comprehensive analysis."""
# Get RAG insights
try:
rag_insights = ai.query_rag(f"Provide insights: {natural_query}")
print(f"AI Insights: {rag_insights}")
except Exception as e:
print(f"RAG query failed: {e}")
rag_insights = None
# Execute traditional query if provided
if fallback_edgeql:
try:
data = client.query(fallback_edgeql)
print(f"Raw data: {len(data)} records")
return {"insights": rag_insights, "data": data}
except Exception as e:
print(f"EdgeQL query failed: {e}")
return {"insights": rag_insights, "data": None}
# Combined analysis approach
result = hybrid_analysis(
natural_query="User engagement trends over time",
fallback_edgeql="SELECT User { name, created_at, last_login } ORDER BY .created_at DESC"
)
if result["insights"]:
print("Analysis completed with AI insights")
if result["data"]:
print(f"Supporting data: {len(result['data'])} records")Install with Tessl CLI
npx tessl i tessl/pypi-edgedb