Low-level, data-driven core of boto 3 providing foundational AWS service access.
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Built-in stubbing capabilities for testing AWS service interactions with mock responses, error simulation, and comprehensive parameter validation. The testing framework allows you to write unit tests without making actual AWS API calls.
from botocore.stub import Stubber, ANY
from botocore.exceptions import (
StubResponseError,
StubAssertionError,
UnStubbedResponseError
)Primary class for stubbing AWS service client responses in tests.
class Stubber:
def __init__(self, client: BaseClient):
"""
Initialize stubber for AWS service client.
Args:
client: AWS service client to stub
"""
def activate(self) -> None:
"""
Activate response stubbing on the client.
Registers event handlers to intercept API calls.
"""
def deactivate(self) -> None:
"""
Deactivate response stubbing on the client.
Unregisters event handlers and restores normal operation.
"""
def add_response(
self,
method: str,
service_response: dict,
expected_params: dict = None
) -> None:
"""
Add mock response for service method.
Args:
method: Client method name to stub
service_response: Response data to return
expected_params: Expected parameters for validation
"""
def add_client_error(
self,
method: str,
service_error_code: str = '',
service_message: str = '',
http_status_code: int = 400,
service_error_meta: dict = None,
expected_params: dict = None,
response_meta: dict = None,
modeled_fields: dict = None
) -> None:
"""
Add ClientError response for service method.
Args:
method: Client method name to stub
service_error_code: AWS error code (e.g., 'NoSuchBucket')
service_message: Human-readable error message
http_status_code: HTTP status code for error
service_error_meta: Additional error metadata
expected_params: Expected parameters for validation
response_meta: Additional response metadata
modeled_fields: Validated fields based on error shape
"""
def assert_no_pending_responses(self) -> None:
"""
Assert that all stubbed responses were consumed.
Raises AssertionError if unused responses remain.
"""
def __enter__(self) -> 'Stubber':
"""Context manager entry - activates stubber."""
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Context manager exit - deactivates stubber."""Special constants and utilities for flexible parameter validation.
ANY: object
"""
Wildcard constant that matches any parameter value.
Use for parameters with unpredictable values like timestamps or UUIDs.
"""Specific exceptions for stubbing-related errors.
class StubResponseError(BotoCoreError):
"""Base exception for stubber response errors."""
pass
class StubAssertionError(StubResponseError, AssertionError):
"""Parameter validation failed during stubbed call."""
pass
class UnStubbedResponseError(StubResponseError):
"""API call made without corresponding stubbed response."""
passBasic example of stubbing a successful service response.
import botocore.session
from botocore.stub import Stubber
# Create client and stubber
session = botocore.session.get_session()
s3_client = session.create_client('s3', region_name='us-east-1')
stubber = Stubber(s3_client)
# Define expected response
response = {
'Buckets': [
{
'Name': 'test-bucket',
'CreationDate': datetime.datetime(2020, 1, 1)
}
],
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200
}
}
# Add stubbed response
stubber.add_response('list_buckets', response)
# Activate stubber and make call
stubber.activate()
result = s3_client.list_buckets()
stubber.deactivate()
assert result == responseUsing stubber as context manager for automatic activation/deactivation.
import botocore.session
from botocore.stub import Stubber
session = botocore.session.get_session()
ec2_client = session.create_client('ec2', region_name='us-west-2')
response = {
'Instances': [
{
'InstanceId': 'i-1234567890abcdef0',
'State': {'Name': 'running'},
'InstanceType': 't2.micro'
}
]
}
with Stubber(ec2_client) as stubber:
stubber.add_response('describe_instances', response)
result = ec2_client.describe_instances()
assert result == responseValidate that methods are called with expected parameters.
import botocore.session
from botocore.stub import Stubber
session = botocore.session.get_session()
s3_client = session.create_client('s3', region_name='us-east-1')
response = {
'Contents': [
{
'Key': 'test-file.txt',
'Size': 1024,
'LastModified': datetime.datetime(2020, 1, 1)
}
]
}
expected_params = {
'Bucket': 'my-test-bucket',
'Prefix': 'uploads/',
'MaxKeys': 100
}
with Stubber(s3_client) as stubber:
stubber.add_response('list_objects_v2', response, expected_params)
# This call matches expected parameters
result = s3_client.list_objects_v2(
Bucket='my-test-bucket',
Prefix='uploads/',
MaxKeys=100
)Ignore specific parameter values that are unpredictable.
import botocore.session
from botocore.stub import Stubber, ANY
session = botocore.session.get_session()
dynamodb_client = session.create_client('dynamodb', region_name='us-east-1')
response = {
'Item': {
'id': {'S': 'test-id'},
'name': {'S': 'Test Item'}
}
}
# Use ANY for unpredictable parameters
expected_params = {
'TableName': 'my-table',
'Key': {'id': {'S': 'test-id'}},
'ConsistentRead': ANY # Don't care about this parameter
}
with Stubber(dynamodb_client) as stubber:
stubber.add_response('get_item', response, expected_params)
# ConsistentRead can be any value
result = dynamodb_client.get_item(
TableName='my-table',
Key={'id': {'S': 'test-id'}},
ConsistentRead=True # or False, doesn't matter
)Simulate AWS service errors for error handling tests.
import botocore.session
from botocore.stub import Stubber
from botocore.exceptions import ClientError
import pytest
session = botocore.session.get_session()
s3_client = session.create_client('s3', region_name='us-east-1')
with Stubber(s3_client) as stubber:
# Add error response
stubber.add_client_error(
'get_object',
service_error_code='NoSuchKey',
service_message='The specified key does not exist.',
http_status_code=404
)
# Test error handling
with pytest.raises(ClientError) as exc_info:
s3_client.get_object(Bucket='test-bucket', Key='nonexistent.txt')
error = exc_info.value
assert error.response['Error']['Code'] == 'NoSuchKey'
assert error.response['ResponseMetadata']['HTTPStatusCode'] == 404Add additional error metadata and response fields.
import botocore.session
from botocore.stub import Stubber
from botocore.exceptions import ClientError
session = botocore.session.get_session()
s3_client = session.create_client('s3', region_name='us-east-1')
with Stubber(s3_client) as stubber:
stubber.add_client_error(
'restore_object',
service_error_code='InvalidObjectState',
service_message='Object is in invalid state',
http_status_code=403,
service_error_meta={
'StorageClass': 'GLACIER',
'ActualObjectState': 'Archived'
},
response_meta={
'RequestId': 'error-123',
'HostId': 'host-error-456'
}
)
with pytest.raises(ClientError) as exc_info:
s3_client.restore_object(
Bucket='test-bucket',
Key='archived-file.txt',
RestoreRequest={'Days': 7}
)
error = exc_info.value
assert error.response['Error']['StorageClass'] == 'GLACIER'
assert error.response['ResponseMetadata']['RequestId'] == 'error-123'Complete example using pytest for AWS service testing.
import pytest
import botocore.session
from botocore.stub import Stubber
from botocore.exceptions import ClientError
@pytest.fixture
def s3_client():
"""Create S3 client for testing."""
session = botocore.session.get_session()
return session.create_client('s3', region_name='us-east-1')
@pytest.fixture
def s3_stubber(s3_client):
"""Create activated stubber for S3 client."""
with Stubber(s3_client) as stubber:
yield stubber
def test_successful_bucket_creation(s3_client, s3_stubber):
"""Test successful bucket creation."""
expected_params = {'Bucket': 'test-bucket'}
response = {
'Location': '/test-bucket',
'ResponseMetadata': {'HTTPStatusCode': 200}
}
s3_stubber.add_response('create_bucket', response, expected_params)
result = s3_client.create_bucket(Bucket='test-bucket')
assert result['Location'] == '/test-bucket'
def test_bucket_already_exists_error(s3_client, s3_stubber):
"""Test bucket creation when bucket already exists."""
s3_stubber.add_client_error(
'create_bucket',
service_error_code='BucketAlreadyExists',
service_message='The requested bucket name is not available.',
http_status_code=409
)
with pytest.raises(ClientError) as exc_info:
s3_client.create_bucket(Bucket='existing-bucket')
assert exc_info.value.response['Error']['Code'] == 'BucketAlreadyExists'
def test_multiple_operations(s3_client, s3_stubber):
"""Test multiple stubbed operations in sequence."""
# First operation: create bucket
s3_stubber.add_response(
'create_bucket',
{'Location': '/test-bucket'},
{'Bucket': 'test-bucket'}
)
# Second operation: put object
s3_stubber.add_response(
'put_object',
{'ETag': '"abc123"'},
{
'Bucket': 'test-bucket',
'Key': 'test-file.txt',
'Body': b'Hello, World!'
}
)
# Third operation: list objects
s3_stubber.add_response(
'list_objects_v2',
{
'Contents': [
{'Key': 'test-file.txt', 'Size': 13}
]
},
{'Bucket': 'test-bucket'}
)
# Execute operations in order
s3_client.create_bucket(Bucket='test-bucket')
s3_client.put_object(
Bucket='test-bucket',
Key='test-file.txt',
Body=b'Hello, World!'
)
result = s3_client.list_objects_v2(Bucket='test-bucket')
assert len(result['Contents']) == 1
assert result['Contents'][0]['Key'] == 'test-file.txt'Using stubber with Python's built-in unittest framework.
import unittest
import botocore.session
from botocore.stub import Stubber
from botocore.exceptions import ClientError
class TestS3Operations(unittest.TestCase):
def setUp(self):
"""Set up test fixtures before each test method."""
session = botocore.session.get_session()
self.s3_client = session.create_client('s3', region_name='us-east-1')
self.stubber = Stubber(self.s3_client)
def tearDown(self):
"""Clean up after each test method."""
# Ensure stubber is deactivated
try:
self.stubber.deactivate()
except:
pass # Already deactivated
def test_upload_file_success(self):
"""Test successful file upload."""
expected_params = {
'Bucket': 'uploads',
'Key': 'document.pdf',
'Body': unittest.mock.ANY # File content can vary
}
response = {
'ETag': '"d41d8cd98f00b204e9800998ecf8427e"',
'ResponseMetadata': {'HTTPStatusCode': 200}
}
self.stubber.add_response('put_object', response, expected_params)
self.stubber.activate()
result = self.s3_client.put_object(
Bucket='uploads',
Key='document.pdf',
Body=b'PDF content here'
)
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
self.stubber.assert_no_pending_responses()
def test_file_not_found_error(self):
"""Test handling of file not found error."""
self.stubber.add_client_error(
'get_object',
service_error_code='NoSuchKey',
service_message='The specified key does not exist.'
)
self.stubber.activate()
with self.assertRaises(ClientError) as context:
self.s3_client.get_object(Bucket='test', Key='missing.txt')
self.assertEqual(
context.exception.response['Error']['Code'],
'NoSuchKey'
)
if __name__ == '__main__':
unittest.main()Test workflows involving multiple AWS services.
import botocore.session
from botocore.stub import Stubber
import pytest
class AWSWorkflowTest:
"""Test complex workflow across multiple AWS services."""
def setup_method(self):
"""Set up clients and stubbers for each test."""
session = botocore.session.get_session()
# Create clients
self.s3_client = session.create_client('s3', region_name='us-east-1')
self.lambda_client = session.create_client('lambda', region_name='us-east-1')
self.sns_client = session.create_client('sns', region_name='us-east-1')
# Create stubbers
self.s3_stubber = Stubber(self.s3_client)
self.lambda_stubber = Stubber(self.lambda_client)
self.sns_stubber = Stubber(self.sns_client)
def test_file_processing_workflow(self):
"""Test complete file processing workflow."""
# 1. Upload file to S3
self.s3_stubber.add_response(
'put_object',
{
'ETag': '"abc123"',
'ResponseMetadata': {'HTTPStatusCode': 200}
},
{
'Bucket': 'processing-bucket',
'Key': 'input/data.csv',
'Body': ANY
}
)
# 2. Invoke Lambda function
self.lambda_stubber.add_response(
'invoke',
{
'StatusCode': 200,
'Payload': b'{"status": "success", "records_processed": 1000}'
},
{
'FunctionName': 'process-data-function',
'Payload': ANY
}
)
# 3. Send notification
self.sns_stubber.add_response(
'publish',
{
'MessageId': 'msg-123',
'ResponseMetadata': {'HTTPStatusCode': 200}
},
{
'TopicArn': 'arn:aws:sns:us-east-1:123456789012:processing-complete',
'Message': ANY
}
)
# Activate all stubbers
with self.s3_stubber, self.lambda_stubber, self.sns_stubber:
# Execute workflow
self.s3_client.put_object(
Bucket='processing-bucket',
Key='input/data.csv',
Body=b'name,age\nJohn,30\nJane,25'
)
lambda_response = self.lambda_client.invoke(
FunctionName='process-data-function',
Payload='{"source": "s3://processing-bucket/input/data.csv"}'
)
self.sns_client.publish(
TopicArn='arn:aws:sns:us-east-1:123456789012:processing-complete',
Message='Data processing completed successfully'
)
# Verify Lambda response
assert lambda_response['StatusCode'] == 200Test paginated operations with multiple page responses.
import botocore.session
from botocore.stub import Stubber
def test_paginated_list_objects():
"""Test paginated S3 list operations."""
session = botocore.session.get_session()
s3_client = session.create_client('s3', region_name='us-east-1')
with Stubber(s3_client) as stubber:
# First page
stubber.add_response(
'list_objects_v2',
{
'Contents': [
{'Key': f'file-{i}.txt', 'Size': 100}
for i in range(1000)
],
'IsTruncated': True,
'NextContinuationToken': 'token-123'
},
{'Bucket': 'large-bucket', 'MaxKeys': 1000}
)
# Second page
stubber.add_response(
'list_objects_v2',
{
'Contents': [
{'Key': f'file-{i}.txt', 'Size': 100}
for i in range(1000, 1500)
],
'IsTruncated': False
},
{
'Bucket': 'large-bucket',
'MaxKeys': 1000,
'ContinuationToken': 'token-123'
}
)
# Test pagination manually
response1 = s3_client.list_objects_v2(
Bucket='large-bucket',
MaxKeys=1000
)
assert len(response1['Contents']) == 1000
assert response1['IsTruncated'] is True
response2 = s3_client.list_objects_v2(
Bucket='large-bucket',
MaxKeys=1000,
ContinuationToken=response1['NextContinuationToken']
)
assert len(response2['Contents']) == 500
assert response2['IsTruncated'] is FalseStructure your tests for maintainability and clarity.
import botocore.session
from botocore.stub import Stubber
import pytest
class TestDataProcessing:
"""Group related tests in classes."""
@pytest.fixture(autouse=True)
def setup_clients(self):
"""Automatically set up clients for all tests."""
session = botocore.session.get_session()
self.s3_client = session.create_client('s3', region_name='us-east-1')
self.dynamodb_client = session.create_client('dynamodb', region_name='us-east-1')
def test_valid_data_processing(self):
"""Test processing with valid data."""
with Stubber(self.s3_client) as s3_stubber:
# Add S3 stubs for valid data scenario
s3_stubber.add_response('get_object', self._valid_data_response())
# Test implementation here
pass
def test_invalid_data_handling(self):
"""Test processing with invalid data."""
with Stubber(self.s3_client) as s3_stubber:
# Add S3 stubs for invalid data scenario
s3_stubber.add_client_error(
'get_object',
service_error_code='NoSuchKey',
service_message='File not found'
)
# Test error handling here
pass
def _valid_data_response(self):
"""Helper method for consistent test data."""
return {
'Body': MockStreamingBody(b'valid,csv,data'),
'ContentLength': 15,
'ResponseMetadata': {'HTTPStatusCode': 200}
}
class MockStreamingBody:
"""Mock streaming body for S3 responses."""
def __init__(self, content):
self._content = content
def read(self, amt=None):
return self._content
def close(self):
passCreate reusable stub data for consistent testing.
# test_data.py - Centralized test data
class S3TestData:
"""Centralized S3 test response data."""
@staticmethod
def list_buckets_response():
return {
'Buckets': [
{'Name': 'bucket-1', 'CreationDate': datetime.datetime(2020, 1, 1)},
{'Name': 'bucket-2', 'CreationDate': datetime.datetime(2020, 2, 1)}
],
'ResponseMetadata': {'HTTPStatusCode': 200}
}
@staticmethod
def empty_bucket_response():
return {
'Contents': [],
'ResponseMetadata': {'HTTPStatusCode': 200}
}
@staticmethod
def access_denied_error():
return {
'service_error_code': 'AccessDenied',
'service_message': 'Access Denied',
'http_status_code': 403
}
# test_s3_operations.py - Using centralized data
from test_data import S3TestData
def test_bucket_listing(s3_client):
"""Test using centralized test data."""
with Stubber(s3_client) as stubber:
stubber.add_response('list_buckets', S3TestData.list_buckets_response())
result = s3_client.list_buckets()
assert len(result['Buckets']) == 2Comprehensive error condition testing.
import pytest
from botocore.exceptions import ClientError
class TestErrorScenarios:
"""Test various AWS error conditions."""
@pytest.mark.parametrize("error_code,http_status,expected_action", [
('NoSuchBucket', 404, 'create_bucket'),
('AccessDenied', 403, 'check_permissions'),
('InternalError', 500, 'retry_operation'),
])
def test_error_handling_strategies(self, s3_client, error_code, http_status, expected_action):
"""Test different error handling strategies."""
with Stubber(s3_client) as stubber:
stubber.add_client_error(
'get_object',
service_error_code=error_code,
http_status_code=http_status
)
with pytest.raises(ClientError) as exc_info:
s3_client.get_object(Bucket='test-bucket', Key='test-key')
error = exc_info.value
assert error.response['Error']['Code'] == error_code
assert error.response['ResponseMetadata']['HTTPStatusCode'] == http_status
# Test appropriate error handling strategy
# Implementation would call appropriate handler based on expected_actionEnsure thorough test validation and cleanup.
def test_complete_workflow_with_validation():
"""Test with comprehensive validation and cleanup."""
session = botocore.session.get_session()
s3_client = session.create_client('s3', region_name='us-east-1')
stubber = Stubber(s3_client)
try:
# Add multiple responses
stubber.add_response('create_bucket', {'Location': '/test-bucket'})
stubber.add_response('put_object', {'ETag': '"abc123"'})
stubber.add_response('get_object', {'Body': MockStreamingBody(b'test')})
stubber.activate()
# Execute operations
s3_client.create_bucket(Bucket='test-bucket')
s3_client.put_object(Bucket='test-bucket', Key='test.txt', Body=b'test')
result = s3_client.get_object(Bucket='test-bucket', Key='test.txt')
# Validate results
assert result['Body'].read() == b'test'
# Ensure all stubs were used
stubber.assert_no_pending_responses()
finally:
# Always clean up
stubber.deactivate()The testing framework provides comprehensive support for testing AWS service interactions without making actual API calls, enabling fast, reliable unit tests that validate both successful operations and error conditions.
Install with Tessl CLI
npx tessl i tessl/pypi-botocore