Python Client SDK for the Mistral AI API with chat completions, embeddings, fine-tuning, and agent capabilities.
—
Moderate content and classify text using Mistral's safety and classification models. These capabilities help ensure content safety and enable automated text categorization.
Analyze content for safety and policy compliance.
def moderate(
inputs: List[Union[str, Dict]],
model: Optional[str] = None,
**kwargs
) -> ModerationResponse:
"""
Moderate content for safety compliance.
Parameters:
- inputs: List of text strings or message objects to moderate
- model: Optional moderation model identifier
Returns:
ModerationResponse with safety analysis results
"""Classify text into predefined categories or custom classes.
def classify(
inputs: List[str],
model: str,
**kwargs
) -> ClassificationResponse:
"""
Classify text inputs.
Parameters:
- inputs: List of text strings to classify
- model: Classification model identifier
Returns:
ClassificationResponse with classification results
"""from mistralai import Mistral
client = Mistral(api_key="your-api-key")
# Moderate text content
texts_to_moderate = [
"This is a normal message about cooking recipes.",
"Let me share some travel tips for your vacation.",
"Here's how to build a simple web application."
]
response = client.classifiers.moderate(
inputs=texts_to_moderate
)
# Check moderation results
for i, result in enumerate(response.results):
print(f"Text {i + 1}:")
print(f" Safe: {not result.flagged}")
if result.flagged:
print(f" Flagged categories: {[cat.category for cat in result.categories if cat.flagged]}")
print(f" Text: {texts_to_moderate[i][:50]}...")
print()from mistralai.models import UserMessage, AssistantMessage
# Moderate conversation messages
messages = [
{"role": "user", "content": "How do I bake a chocolate cake?"},
{"role": "assistant", "content": "Here's a simple chocolate cake recipe..."},
{"role": "user", "content": "Can you suggest healthy cooking alternatives?"}
]
response = client.classifiers.moderate(inputs=messages)
# Process results
for i, result in enumerate(response.results):
message = messages[i]
print(f"Message from {message['role']}:")
print(f" Content safe: {not result.flagged}")
if result.flagged:
print(f" Issues found: {[cat.category for cat in result.categories if cat.flagged]}")# Classify text into categories
texts = [
"The stock market showed significant gains today with tech shares leading.",
"Scientists discovered a new species of butterfly in the Amazon rainforest.",
"The latest smartphone model features improved camera and battery life.",
"Local community center offers free coding classes for children."
]
response = client.classifiers.classify(
inputs=texts,
model="classification-model"
)
# Review classifications
for i, prediction in enumerate(response.predictions):
print(f"Text {i + 1}: {texts[i][:50]}...")
print(f" Category: {prediction.category}")
print(f" Confidence: {prediction.confidence:.3f}")
print()class ChatModerationRequest:
inputs: List[Union[str, Dict]]
model: Optional[str]
class ModerationResponse:
id: str
model: str
results: List[ModerationObject]
class ModerationObject:
flagged: bool
categories: List[ModerationCategory]
class ModerationCategory:
category: str
flagged: bool
score: floatclass ClassificationRequest:
inputs: List[str]
model: str
class ClassificationResponse:
id: str
object: str
model: str
predictions: List[Prediction]
class Prediction:
category: str
confidence: float
class ClassificationTargetResult:
category: str
score: floatCommon moderation categories include:
Install with Tessl CLI
npx tessl i tessl/pypi-mistralai