tessl install https://github.com/microsoft/agent-skills --skill foundry-sdk-pythongithub.com/microsoft/agent-skills
Build AI applications using the Azure AI Projects Python SDK (azure-ai-projects). Use when working with Foundry project clients, creating versioned agents with PromptAgentDefinition, running evaluations, managing connections/deployments/datasets/indexes, or using OpenAI-compatible clients. This is the high-level Foundry SDK - for low-level agent operations, use azure-ai-agents-python skill.
Average Score
98%
Content
100%
Description
100%
Build AI applications on Azure AI Foundry using the azure-ai-projects SDK.
pip install azure-ai-projects azure-identityAZURE_AI_PROJECT_ENDPOINT="https://<resource>.services.ai.azure.com/api/projects/<project>"
AZURE_AI_MODEL_DEPLOYMENT_NAME="gpt-4o-mini"import os
from azure.identity import DefaultAzureCredential
from azure.ai.projects import AIProjectClient
credential = DefaultAzureCredential()
client = AIProjectClient(
endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
credential=credential,
)| Operation | Access | Purpose |
|---|---|---|
client.agents | .agents.* | Agent CRUD, versions, threads, runs |
client.connections | .connections.* | List/get project connections |
client.deployments | .deployments.* | List model deployments |
client.datasets | .datasets.* | Dataset management |
client.indexes | .indexes.* | Index management |
client.evaluations | .evaluations.* | Run evaluations |
client.red_teams | .red_teams.* | Red team operations |
from azure.ai.projects import AIProjectClient
client = AIProjectClient(
endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
credential=DefaultAzureCredential(),
)
# Use Foundry-native operations
agent = client.agents.create_agent(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
name="my-agent",
instructions="You are helpful.",
)# Get OpenAI-compatible client from project
openai_client = client.get_openai_client()
# Use standard OpenAI API
response = openai_client.chat.completions.create(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
messages=[{"role": "user", "content": "Hello!"}],
)agent = client.agents.create_agent(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
name="my-agent",
instructions="You are a helpful assistant.",
)from azure.ai.agents import CodeInterpreterTool, FileSearchTool
agent = client.agents.create_agent(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
name="tool-agent",
instructions="You can execute code and search files.",
tools=[CodeInterpreterTool(), FileSearchTool()],
)from azure.ai.projects.models import PromptAgentDefinition
# Create a versioned agent
agent_version = client.agents.create_version(
agent_name="customer-support-agent",
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
instructions="You are a customer support specialist.",
tools=[], # Add tools as needed
),
version_label="v1.0",
)See references/agents.md for detailed agent patterns.
| Tool | Class | Use Case |
|---|---|---|
| Code Interpreter | CodeInterpreterTool | Execute Python, generate files |
| File Search | FileSearchTool | RAG over uploaded documents |
| Bing Grounding | BingGroundingTool | Web search (requires connection) |
| Azure AI Search | AzureAISearchTool | Search your indexes |
| Function Calling | FunctionTool | Call your Python functions |
| OpenAPI | OpenApiTool | Call REST APIs |
| MCP | McpTool | Model Context Protocol servers |
| Memory Search | MemorySearchTool | Search agent memory stores |
| SharePoint | SharepointGroundingTool | Search SharePoint content |
See references/tools.md for all tool patterns.
# 1. Create thread
thread = client.agents.threads.create()
# 2. Add message
client.agents.messages.create(
thread_id=thread.id,
role="user",
content="What's the weather like?",
)
# 3. Create and process run
run = client.agents.runs.create_and_process(
thread_id=thread.id,
agent_id=agent.id,
)
# 4. Get response
if run.status == "completed":
messages = client.agents.messages.list(thread_id=thread.id)
for msg in messages:
if msg.role == "assistant":
print(msg.content[0].text.value)# List all connections
connections = client.connections.list()
for conn in connections:
print(f"{conn.name}: {conn.connection_type}")
# Get specific connection
connection = client.connections.get(connection_name="my-search-connection")See references/connections.md for connection patterns.
# List available model deployments
deployments = client.deployments.list()
for deployment in deployments:
print(f"{deployment.name}: {deployment.model}")See references/deployments.md for deployment patterns.
# List datasets
datasets = client.datasets.list()
# List indexes
indexes = client.indexes.list()See references/datasets-indexes.md for data operations.
# Using OpenAI client for evals
openai_client = client.get_openai_client()
# Create evaluation with built-in evaluators
eval_run = openai_client.evals.runs.create(
eval_id="my-eval",
name="quality-check",
data_source={
"type": "custom",
"item_references": [{"item_id": "test-1"}],
},
testing_criteria=[
{"type": "fluency"},
{"type": "task_adherence"},
],
)See references/evaluation.md for evaluation patterns.
from azure.ai.projects.aio import AIProjectClient
async with AIProjectClient(
endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
credential=DefaultAzureCredential(),
) as client:
agent = await client.agents.create_agent(...)
# ... async operationsSee references/async-patterns.md for async patterns.
# Create memory store for agent
memory_store = client.agents.create_memory_store(
name="conversation-memory",
)
# Attach to agent for persistent memory
agent = client.agents.create_agent(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
name="memory-agent",
tools=[MemorySearchTool()],
tool_resources={"memory": {"store_ids": [memory_store.id]}},
)async with AIProjectClient(...) as client:client.agents.delete_agent(agent.id)create_and_process for simple runs, streaming for real-time UX| Feature | azure-ai-projects | azure-ai-agents |
|---|---|---|
| Level | High-level (Foundry) | Low-level (Agents) |
| Client | AIProjectClient | AgentsClient |
| Versioning | create_version() | Not available |
| Connections | Yes | No |
| Deployments | Yes | No |
| Datasets/Indexes | Yes | No |
| Evaluation | Via OpenAI client | No |
| When to use | Full Foundry integration | Standalone agent apps |