tessl install tessl/golang-github-com-azure-azure-sdk-for-go-sdk-ai-azopenai@0.9.0Azure OpenAI extensions module for Go providing models and convenience functions to simplify integration with Azure OpenAI features.
Azure OpenAI's content filtering evaluates both input prompts and generated completions for potentially harmful content. The filtering system detects multiple categories of harmful content and provides severity ratings.
func ExtractContentFilterError(err error, contentFilterErr **ContentFilterError) boolChecks if an error contains content filtering information. If content was filtered, it assigns the filtering details to *contentFilterErr, similar to errors.As().
Parameters:
err: The error to checkcontentFilterErr: Pointer to pointer where content filter error will be storedReturns: true if content filtering information was found, false otherwise
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai"
"github.com/openai/openai-go/v3"
)
resp, err := client.Chat.Completions.New(context.TODO(), chatParams)
// Check for content filtering errors
var contentFilterErr *azopenai.ContentFilterError
if azopenai.ExtractContentFilterError(err, &contentFilterErr) {
// Content was filtered
if contentFilterErr.Hate != nil && *contentFilterErr.Hate.Filtered {
fmt.Printf("Hate content detected with severity: %s\n",
*contentFilterErr.Hate.Severity)
}
if contentFilterErr.Violence != nil && *contentFilterErr.Violence.Filtered {
fmt.Printf("Violent content detected with severity: %s\n",
*contentFilterErr.Violence.Severity)
}
if contentFilterErr.Sexual != nil && *contentFilterErr.Sexual.Filtered {
fmt.Printf("Sexual content detected with severity: %s\n",
*contentFilterErr.Sexual.Severity)
}
if contentFilterErr.SelfHarm != nil && *contentFilterErr.SelfHarm.Filtered {
fmt.Printf("Self-harm content detected with severity: %s\n",
*contentFilterErr.SelfHarm.Severity)
}
return
}
// If no filtering error, check response-level content filtering
if err == nil {
azureCompletion := azopenai.ChatCompletion(*resp)
promptFilters, err := azureCompletion.PromptFilterResults()
// Process prompt filtering results...
for _, choice := range resp.Choices {
azureChoice := azopenai.ChatCompletionChoice(choice)
choiceFilters, err := azureChoice.ContentFilterResults()
// Process choice filtering results...
}
}type ContentFilterError struct {
OpenAIError *openai.Error
ContentFilterResultDetailsForPrompt
}
func (c *ContentFilterError) Error() string
func (c *ContentFilterError) Unwrap() error
func (c *ContentFilterError) NonRetriable()The ContentFilterError type contains detailed information about why content was filtered. It embeds ContentFilterResultDetailsForPrompt, providing direct access to all filtering details.
Methods:
Error(): Returns error message string (implements error interface)Unwrap(): Returns the underlying OpenAI errorNonRetriable(): Marker method indicating the request failure is terminaltype ChatCompletion openai.ChatCompletion
func (c ChatCompletion) PromptFilterResults() ([]ContentFilterResultsForPrompt, error)Extract content filtering results for input prompts from a chat completion response:
azureCompletion := azopenai.ChatCompletion(*resp)
promptFilters, err := azureCompletion.PromptFilterResults()
if err == nil {
for _, filter := range promptFilters {
fmt.Printf("Prompt %d filtering:\n", *filter.PromptIndex)
if filter.ContentFilterResults != nil {
// Check specific categories
if filter.ContentFilterResults.Hate != nil {
fmt.Printf(" Hate: filtered=%v, severity=%s\n",
*filter.ContentFilterResults.Hate.Filtered,
*filter.ContentFilterResults.Hate.Severity)
}
}
}
}type ContentFilterResultsForPrompt struct {
ContentFilterResults *ContentFilterResultDetailsForPrompt // REQUIRED
PromptIndex *int32 // REQUIRED
}
type ContentFilterResultDetailsForPrompt struct {
CustomBlocklists *ContentFilterDetailedResults
Error *Error
Hate *ContentFilterResult
IndirectAttack *ContentFilterDetectionResult
Jailbreak *ContentFilterDetectionResult
Profanity *ContentFilterDetectionResult
SelfHarm *ContentFilterResult
Sexual *ContentFilterResult
Violence *ContentFilterResult
}Prompt Filtering Categories:
type ChatCompletionChoice openai.ChatCompletionChoice
func (c ChatCompletionChoice) ContentFilterResults() (*ContentFilterResultsForChoice, error)Extract content filtering results for generated completions:
for _, choice := range resp.Choices {
azureChoice := azopenai.ChatCompletionChoice(choice)
filters, err := azureChoice.ContentFilterResults()
if err == nil && filters != nil {
if filters.Hate != nil && *filters.Hate.Filtered {
fmt.Printf("Generated content contained hate speech\n")
}
if filters.ProtectedMaterialCode != nil && *filters.ProtectedMaterialCode.Detected {
fmt.Printf("Protected code material detected\n")
}
}
}type ContentFilterResultsForChoice struct {
CustomBlocklists *ContentFilterDetailedResults
Error *Error
Hate *ContentFilterResult
Profanity *ContentFilterDetectionResult
ProtectedMaterialCode *ContentFilterCitedDetectionResult
ProtectedMaterialText *ContentFilterDetectionResult
SelfHarm *ContentFilterResult
Sexual *ContentFilterResult
UngroundedMaterial *ContentFilterCompletionTextSpanResult
Violence *ContentFilterResult
}Completion Filtering Categories:
For categories with severity ratings (Hate, Sexual, Violence, SelfHarm):
type ContentFilterResult struct {
Filtered *bool // REQUIRED; Whether content was filtered
Severity *ContentFilterSeverity // REQUIRED; Severity rating
}For binary detection categories (Profanity, Jailbreak, IndirectAttack, ProtectedMaterialText):
type ContentFilterDetectionResult struct {
Detected *bool // REQUIRED; Whether detection occurred
Filtered *bool // REQUIRED; Whether content was filtered
}For detections with citation information (ProtectedMaterialCode):
type ContentFilterCitedDetectionResult struct {
Detected *bool // REQUIRED; Whether detection occurred
Filtered *bool // REQUIRED; Whether content was filtered
License *string // License description
URL *string // Internet location associated with detection
}For detections with location information (UngroundedMaterial):
type ContentFilterCompletionTextSpanResult struct {
Details []ContentFilterCompletionTextSpan // REQUIRED
Detected *bool // REQUIRED
Filtered *bool // REQUIRED
}
type ContentFilterCompletionTextSpan struct {
CompletionEndOffset *int32 // REQUIRED; Offset of first excluded UTF32 code point
CompletionStartOffset *int32 // REQUIRED; Offset of UTF32 code point beginning span
}type ContentFilterSeverity string
const (
ContentFilterSeveritySafe ContentFilterSeverity = "safe"
ContentFilterSeverityLow ContentFilterSeverity = "low"
ContentFilterSeverityMedium ContentFilterSeverity = "medium"
ContentFilterSeverityHigh ContentFilterSeverity = "high"
)
func PossibleContentFilterSeverityValues() []ContentFilterSeveritySeverity Descriptions:
Safe: Content may be related to sensitive categories but used in general, journalistic, scientific, medical, or similar professional contexts. Appropriate for most audiences.
Low: Content expresses prejudiced, judgmental, or opinionated views. Includes offensive language, stereotyping, use cases exploring fictional worlds (gaming, literature), and depictions at low intensity.
Medium: Content uses offensive, insulting, mocking, intimidating, or demeaning language towards specific identity groups. Includes depictions of seeking and executing harmful instructions, fantasies, glorification, and promotion of harm at medium intensity.
High: Content displays explicit and severe harmful instructions, actions, damage, or abuse. Includes endorsement, glorification, or promotion of severe harmful acts, extreme or illegal forms of harm, radicalization, or non-consensual power exchange or abuse.
Custom blocklists allow you to define specific terms or patterns to filter:
type ContentFilterDetailedResults struct {
Details []ContentFilterBlocklistIDResult // REQUIRED
Filtered *bool // REQUIRED
}
type ContentFilterBlocklistIDResult struct {
Filtered *bool // REQUIRED; Whether content was filtered by this blocklist
ID *string // REQUIRED; ID of the custom blocklist evaluated
}Each blocklist result indicates which custom blocklist triggered and whether the content was filtered.
For streaming chat completions, extract content filter results from chunks:
type ChatCompletionChunk openai.ChatCompletionChunk
func (c ChatCompletionChunk) PromptFilterResults() ([]ContentFilterResultsForPrompt, error)Example:
stream := client.Chat.Completions.NewStreaming(context.TODO(), chatParams)
for stream.Next() {
chunk := stream.Current()
azureChunk := azopenai.ChatCompletionChunk(chunk)
// Check prompt filters (may arrive at different times for different prompts)
promptFilters, err := azureChunk.PromptFilterResults()
if err == nil {
for _, filter := range promptFilters {
// Process prompt filtering...
}
}
}Content filtering also works with the legacy completions API:
type Completion openai.Completion
type CompletionChoice openai.CompletionChoice
func (c Completion) PromptFilterResults() ([]ContentFilterResultsForPrompt, error)
func (c CompletionChoice) ContentFilterResults() (*ContentFilterResultsForChoice, error)Usage is identical to chat completions, but operates on Completion and CompletionChoice types instead.
Content filtering can result in errors at two levels:
Request-level: Filtering occurs during request processing, resulting in a ContentFilterError that can be extracted using ExtractContentFilterError().
Response-level: Filtering information is included in the successful response and can be accessed via the wrapper types' methods.
Always check both levels:
resp, err := client.Chat.Completions.New(context.TODO(), chatParams)
// Check request-level filtering
var contentErr *azopenai.ContentFilterError
if azopenai.ExtractContentFilterError(err, &contentErr) {
// Handle filtered request...
return
}
if err != nil {
// Handle other errors...
return
}
// Check response-level filtering
azureResp := azopenai.ChatCompletion(*resp)
promptFilters, _ := azureResp.PromptFilterResults()
// Process filters...type Error struct {
Code *string // REQUIRED; Server-defined error code
Message *string // REQUIRED; Human-readable error message
}The Error type represents content filtering system errors (e.g., when the filtering system is temporarily unavailable).