Spring AI integration for Azure OpenAI services providing chat completion, text embeddings, image generation, and audio transcription with GPT, DALL-E, and Whisper models
Get started with Spring AI Azure OpenAI in minutes.
Add the dependency to your pom.xml:
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-azure-openai</artifactId>
<version>1.1.2</version>
</dependency>Or for Gradle (build.gradle):
implementation 'org.springframework.ai:spring-ai-azure-openai:1.1.2'Create an OpenAIClient instance:
import com.azure.ai.openai.OpenAIClient;
import com.azure.ai.openai.OpenAIClientBuilder;
import com.azure.core.credential.AzureKeyCredential;
OpenAIClient openAIClient = new OpenAIClientBuilder()
.credential(new AzureKeyCredential("your-api-key"))
.endpoint("https://your-resource.openai.azure.com/")
.buildClient();import org.springframework.ai.azure.openai.AzureOpenAiChatModel;
import org.springframework.ai.azure.openai.AzureOpenAiChatOptions;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.chat.model.ChatResponse;
// Create chat model
AzureOpenAiChatOptions options = AzureOpenAiChatOptions.builder()
.deploymentName("gpt-4o")
.temperature(0.7)
.build();
AzureOpenAiChatModel chatModel = AzureOpenAiChatModel.builder()
.openAIClientBuilder(new OpenAIClientBuilder()
.credential(new AzureKeyCredential(apiKey))
.endpoint(endpoint))
.defaultOptions(options)
.build();
// Generate response
Prompt prompt = new Prompt("What is Spring AI?");
ChatResponse response = chatModel.call(prompt);
String answer = response.getResult().getOutput().getText();
System.out.println(answer);import reactor.core.publisher.Flux;
Prompt prompt = new Prompt("Tell me a story");
Flux<ChatResponse> stream = chatModel.stream(prompt);
stream.subscribe(chunk -> {
String token = chunk.getResult().getOutput().getText();
if (token != null) {
System.out.print(token);
}
});import org.springframework.ai.azure.openai.AzureOpenAiEmbeddingModel;
import org.springframework.ai.azure.openai.AzureOpenAiEmbeddingOptions;
import org.springframework.ai.embedding.EmbeddingRequest;
import org.springframework.ai.embedding.EmbeddingResponse;
import org.springframework.ai.document.MetadataMode;
// Create embedding model
AzureOpenAiEmbeddingOptions options = AzureOpenAiEmbeddingOptions.builder()
.deploymentName("text-embedding-ada-002")
.build();
AzureOpenAiEmbeddingModel embeddingModel = new AzureOpenAiEmbeddingModel(
openAIClient,
MetadataMode.EMBED,
options
);
// Generate embeddings
List<String> texts = List.of("Machine learning is fascinating");
EmbeddingRequest request = new EmbeddingRequest(texts, null);
EmbeddingResponse response = embeddingModel.call(request);
float[] embedding = response.getResults().get(0).getOutput();
System.out.println("Embedding dimension: " + embedding.length);import org.springframework.ai.azure.openai.AzureOpenAiImageModel;
import org.springframework.ai.azure.openai.AzureOpenAiImageOptions;
import org.springframework.ai.image.ImagePrompt;
import org.springframework.ai.image.ImageResponse;
// Create image model
AzureOpenAiImageOptions options = AzureOpenAiImageOptions.builder()
.deploymentName("dall-e-3")
.width(1024)
.height(1024)
.build();
options.setQuality("hd");
AzureOpenAiImageModel imageModel = new AzureOpenAiImageModel(openAIClient, options);
// Generate image
ImagePrompt prompt = new ImagePrompt("A futuristic city at sunset");
ImageResponse response = imageModel.call(prompt);
String imageUrl = response.getResult().getOutput().getUrl();
System.out.println("Generated image: " + imageUrl);import org.springframework.ai.azure.openai.AzureOpenAiAudioTranscriptionModel;
import org.springframework.ai.azure.openai.AzureOpenAiAudioTranscriptionOptions;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.Resource;
// Create transcription model
AzureOpenAiAudioTranscriptionOptions options =
AzureOpenAiAudioTranscriptionOptions.builder()
.deploymentName("whisper")
.language("en")
.build();
AzureOpenAiAudioTranscriptionModel transcriptionModel =
new AzureOpenAiAudioTranscriptionModel(openAIClient, options);
// Transcribe audio
Resource audioFile = new FileSystemResource("meeting.mp3");
String transcription = transcriptionModel.call(audioFile);
System.out.println(transcription);For Spring Boot applications, configure properties in application.properties:
spring.ai.azure.openai.api-key=your-api-key
spring.ai.azure.openai.endpoint=https://your-resource.openai.azure.com/
spring.ai.azure.openai.chat.options.deployment-name=gpt-4o
spring.ai.azure.openai.chat.options.temperature=0.7Then inject the models:
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
public class AIService {
@Autowired
private AzureOpenAiChatModel chatModel;
@Autowired
private AzureOpenAiEmbeddingModel embeddingModel;
public String chat(String message) {
ChatResponse response = chatModel.call(new Prompt(message));
return response.getResult().getOutput().getText();
}
}Always handle potential exceptions:
import com.azure.core.exception.HttpResponseException;
import com.azure.core.exception.ResourceNotFoundException;
try {
ChatResponse response = chatModel.call(prompt);
} catch (HttpResponseException e) {
if (e.getResponse().getStatusCode() == 429) {
// Rate limit - implement retry with backoff
System.err.println("Rate limit exceeded");
} else if (e.getResponse().getStatusCode() == 401) {
// Authentication error
System.err.println("Invalid API key or endpoint");
}
} catch (ResourceNotFoundException e) {
// Deployment not found
System.err.println("Deployment not found: check deployment name");
}tessl i tessl/maven-org-springframework-ai--spring-ai-azure-openai@1.1.1