Spring AI Chat Client provides a fluent API for building AI-powered applications with LLMs, supporting advisors, streaming, structured outputs, and conversation memory
Learn how to configure requests using the fluent API.
chatClient
.prompt() // Start request
.system("...") // System message (optional)
.user("...") // User message
.options(...) // Model options (optional)
.advisors(...) // Advisors (optional)
.call() // Execute
.content(); // Get responsechatClient.prompt().user("What is Spring?").call().content();chatClient
.prompt()
.user(spec -> spec
.text("Explain {topic} in {language}")
.param("topic", "dependency injection")
.param("language", "Java")
)
.call()
.content();import org.springframework.core.io.ClassPathResource;
chatClient
.prompt()
.user(new ClassPathResource("prompts/query.txt"))
.call()
.content();import org.springframework.ai.content.Media;
import org.springframework.util.MimeTypeUtils;
Media image = new Media(MimeTypeUtils.IMAGE_PNG, imageResource);
chatClient
.prompt()
.user(spec -> spec
.text("Describe this image")
.media(image)
)
.call()
.content();Set context or behavior for the AI:
chatClient
.prompt()
.system("You are a Java expert. Provide concise answers.")
.user("Explain interfaces")
.call()
.content();With parameters:
chatClient
.prompt()
.system(spec -> spec
.text("You are an expert in {domain}")
.param("domain", "microservices")
)
.user("Explain service discovery")
.call()
.content();Configure temperature, max tokens, etc:
import org.springframework.ai.chat.options.ChatOptionsBuilder;
chatClient
.prompt("Write a creative story")
.options(ChatOptionsBuilder.builder()
.withTemperature(0.9) // Higher = more creative
.withMaxTokens(1000) // Limit response length
.withTopP(0.95) // Nucleus sampling
.build())
.call()
.content();Add interceptors for specific requests:
import org.springframework.ai.chat.client.advisor.SimpleLoggerAdvisor;
chatClient
.prompt("Hello")
.advisors(SimpleLoggerAdvisor.builder().build())
.call()
.content();With parameters:
chatClient
.prompt("Continue conversation")
.advisors(spec -> spec
.advisors(memoryAdvisor)
.param("conversationId", "user-123")
)
.call()
.content();Add functions the AI can call:
import org.springframework.ai.tool.ToolCallback;
ToolCallback weatherTool = ToolCallback.builder()
.function("getWeather", this::getWeather)
.description("Get current weather")
.inputType(WeatherRequest.class)
.build();
chatClient
.prompt("What's the weather in Paris?")
.toolCallbacks(weatherTool)
.call()
.content();record Analysis(String summary, List<String> keyPoints, double confidence) {}
Analysis result = chatClient
.prompt()
.system(spec -> spec
.text("You are an analyst. Focus on {aspect}.")
.param("aspect", "technical details")
)
.user(spec -> spec
.text("Analyze: {content}")
.param("content", documentText)
)
.options(ChatOptionsBuilder.builder()
.withTemperature(0.3) // More deterministic
.withMaxTokens(500)
.build())
.advisors(spec -> spec
.advisors(validationAdvisor, loggerAdvisor)
.param("requestId", UUID.randomUUID().toString())
)
.call()
.entity(Analysis.class);String topic = userInput;
if (topic == null || topic.isBlank()) {
throw new IllegalArgumentException("Topic required");
}
chatClient.prompt().user(spec -> spec
.text("Explain {topic}")
.param("topic", topic)
).call().content();// Define once
private static final ChatOptions CREATIVE_OPTIONS =
ChatOptionsBuilder.builder()
.withTemperature(0.9)
.withMaxTokens(1000)
.build();
// Reuse
chatClient.prompt("...").options(CREATIVE_OPTIONS).call().content();var request = chatClient.prompt();
if (includeContext) {
request.system("Additional context: " + context);
}
if (needsValidation) {
request.advisors(validationAdvisor);
}
String response = request.user(query).call().content();