Microsoft Azure AI Content Safety client library for Python providing text and image content analysis APIs with harm category detection and blocklist management
npx @tessl/cli install tessl/pypi-azure-ai-contentsafety@1.0.0A comprehensive Python client library for Azure AI Content Safety services that enables developers to detect harmful user-generated and AI-generated content in applications and services. It provides both text and image analysis APIs that classify content across four harm categories (sexual, violence, hate, and self-harm) with multi-severity levels, includes text blocklist management for screening custom terms, and supports both Azure Key Credential and Microsoft Entra ID authentication methods.
pip install azure-ai-contentsafetySynchronous clients:
from azure.ai.contentsafety import ContentSafetyClient, BlocklistClientAsynchronous clients:
from azure.ai.contentsafety.aio import ContentSafetyClient, BlocklistClientAuthentication:
from azure.core.credentials import AzureKeyCredential
from azure.ai.contentsafety import ContentSafetyClient
# Using API key authentication
client = ContentSafetyClient(
endpoint="https://your-resource.cognitiveservices.azure.com",
credential=AzureKeyCredential("your-api-key")
)from azure.ai.contentsafety import ContentSafetyClient
from azure.ai.contentsafety.models import AnalyzeTextOptions
from azure.core.credentials import AzureKeyCredential
# Initialize the client
client = ContentSafetyClient(
endpoint="https://your-resource.cognitiveservices.azure.com",
credential=AzureKeyCredential("your-api-key")
)
# Analyze text content
request = AnalyzeTextOptions(text="Some text to analyze")
response = client.analyze_text(request)
# Check results
for result in response.categories_analysis:
print(f"Category: {result.category}, Severity: {result.severity}")
# Close the client
client.close()The Azure AI Content Safety client library is organized around two main client classes:
Both clients support:
Analyze text and image content for harmful material across four categories (hate, self-harm, sexual, violence) with configurable severity levels and custom blocklist integration.
def analyze_text(self, options: AnalyzeTextOptions, **kwargs) -> AnalyzeTextResult: ...
def analyze_image(self, options: AnalyzeImageOptions, **kwargs) -> AnalyzeImageResult: ...Create and manage custom text blocklists to screen for domain-specific prohibited terms, with support for adding, updating, removing, and querying blocklist items.
def create_or_update_text_blocklist(self, blocklist_name: str, options: TextBlocklist, **kwargs) -> TextBlocklist: ...
def add_or_update_blocklist_items(self, blocklist_name: str, options: AddOrUpdateTextBlocklistItemsOptions, **kwargs) -> AddOrUpdateTextBlocklistItemsResult: ...
def list_text_blocklists(self, **kwargs) -> Iterable[TextBlocklist]: ...
def delete_text_blocklist(self, blocklist_name: str, **kwargs) -> None: ...class ContentSafetyClient:
def __init__(
self,
endpoint: str,
credential: Union[AzureKeyCredential, TokenCredential],
**kwargs
): ...
def close(self) -> None: ...
def __enter__(self) -> "ContentSafetyClient": ...
def __exit__(self, *exc_details: Any) -> None: ...
class BlocklistClient:
def __init__(
self,
endpoint: str,
credential: Union[AzureKeyCredential, TokenCredential],
**kwargs
): ...
def close(self) -> None: ...
def __enter__(self) -> "BlocklistClient": ...
def __exit__(self, *exc_details: Any) -> None: ...
# Content categories
class TextCategory(str, Enum):
HATE: str
SELF_HARM: str
SEXUAL: str
VIOLENCE: str
class ImageCategory(str, Enum):
HATE: str
SELF_HARM: str
SEXUAL: str
VIOLENCE: str
# Output severity levels
class AnalyzeTextOutputType(str, Enum):
FOUR_SEVERITY_LEVELS: str # 0, 2, 4, 6
EIGHT_SEVERITY_LEVELS: str # 0, 1, 2, 3, 4, 5, 6, 7
class AnalyzeImageOutputType(str, Enum):
FOUR_SEVERITY_LEVELS: str # 0, 2, 4, 6