Expert guidance for Anthropic Claude API development including Messages API, tool use, prompt engineering, and building production applications with Claude models.
You are an expert in Anthropic Claude API development, including the Messages API, tool use, prompt engineering, and building production-ready applications with Claude models.
import os
from anthropic import Anthropic
# Always use environment variables for API keys
client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY")).env files, never commit thempython-dotenv for local developmentfrom anthropic import Anthropic
client = Anthropic()
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
system="You are a helpful assistant.",
messages=[
{"role": "user", "content": "Hello, Claude!"}
]
)
print(message.content[0].text)with client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": "Write a story"}]
) as stream:
for text in stream.text_stream:
print(text, end="", flush=True)claude-opus-4-20250514 for complex reasoning and analysisclaude-sonnet-4-20250514 for balanced performance and costclaude-3-5-haiku-20241022 for fast, efficient responsestools = [
{
"name": "get_weather",
"description": "Get the current weather in a given location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g., San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The unit of temperature"
}
},
"required": ["location"]
}
}
]
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
tools=tools,
messages=[{"role": "user", "content": "What's the weather in London?"}]
)import json
def process_tool_use(response, messages, tools):
# Check if Claude wants to use a tool
if response.stop_reason == "tool_use":
tool_use_block = next(
block for block in response.content
if block.type == "tool_use"
)
tool_name = tool_use_block.name
tool_input = tool_use_block.input
# Execute the tool
tool_result = execute_tool(tool_name, tool_input)
# Continue the conversation
messages.append({"role": "assistant", "content": response.content})
messages.append({
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": tool_use_block.id,
"content": json.dumps(tool_result)
}]
})
# Get final response
return client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
tools=tools,
messages=messages
)
return responseimport base64
# From URL
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "url",
"url": "https://example.com/image.jpg"
}
},
{
"type": "text",
"text": "Describe this image in detail."
}
]
}]
)
# From base64
with open("image.png", "rb") as f:
image_data = base64.standard_b64encode(f.read()).decode("utf-8")
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": image_data
}
},
{
"type": "text",
"text": "What do you see?"
}
]
}]
)system_prompt = """You are a technical documentation writer.
<guidelines>
- Write clear, concise documentation
- Use proper markdown formatting
- Include code examples where appropriate
- Follow the Google developer documentation style guide
</guidelines>
<output_format>
Always structure your response with:
1. Overview
2. Prerequisites
3. Step-by-step instructions
4. Examples
5. Troubleshooting
</output_format>
"""from anthropic import RateLimitError, APIError
import time
def call_with_retry(func, max_retries=3, base_delay=1):
for attempt in range(max_retries):
try:
return func()
except RateLimitError:
delay = base_delay * (2 ** attempt)
print(f"Rate limited. Retrying in {delay}s...")
time.sleep(delay)
except APIError as e:
if attempt == max_retries - 1:
raise
time.sleep(base_delay)
raise Exception("Max retries exceeded")RateLimitError: Implement exponential backoffAPIError: Check API status, retry with backoffAuthenticationError: Verify API keyBadRequestError: Validate input parameters# Enable caching for frequently used context
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
system=[{
"type": "text",
"text": "Large context that should be cached...",
"cache_control": {"type": "ephemeral"}
}],
messages=[{"role": "user", "content": "Question about the context"}]
)# Create a batch for non-time-sensitive requests
batch = client.messages.batches.create(
requests=[
{
"custom_id": "request-1",
"params": {
"model": "claude-sonnet-4-20250514",
"max_tokens": 1024,
"messages": [{"role": "user", "content": "Question 1"}]
}
},
{
"custom_id": "request-2",
"params": {
"model": "claude-sonnet-4-20250514",
"max_tokens": 1024,
"messages": [{"role": "user", "content": "Question 2"}]
}
}
]
)max_tokens limits47f47c1
If you maintain this skill, you can claim it as your own. Once claimed, you can manage eval scenarios, bundle related skills, attach documentation or rules, and ensure cross-agent compatibility.