This package provides an integration layer between the LangChain4j framework and Anthropic's Claude language models, enabling Java developers to seamlessly incorporate Anthropic's AI capabilities into their applications.
The AnthropicStreamingChatModel class provides token-by-token streaming access to Claude chat models through the LangChain4j framework.
Build an AnthropicStreamingChatModel instance using the fluent builder pattern.
package dev.langchain4j.model.anthropic;
import dev.langchain4j.model.chat.StreamingChatModel;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.chat.listener.ChatModelListener;
import dev.langchain4j.model.chat.request.ChatRequestParameters;
import dev.langchain4j.model.Capability;
import dev.langchain4j.model.ModelProvider;
import java.util.List;
import java.util.Set;
/**
* Streaming chat model for Anthropic's Claude API.
* Delivers responses token-by-token as they're generated.
* Thread-safe after construction via builder.
*
* @since 1.0.0
*/
public class AnthropicStreamingChatModel implements StreamingChatModel {
/**
* Creates a new builder for constructing AnthropicStreamingChatModel instances.
*
* @return a new builder instance, never null
*/
public static AnthropicStreamingChatModelBuilder builder();
/**
* Executes a streaming chat request, invoking handler callbacks as tokens arrive.
* Non-blocking; returns immediately while streaming happens asynchronously.
*
* @param chatRequest the chat request containing messages and parameters, must not be null
* @param handler callback handler for streaming events, must not be null
* @throws IllegalArgumentException if chatRequest or handler is null
* @throws RuntimeException if API connection fails
*/
public void doChat(ChatRequest chatRequest, StreamingChatResponseHandler handler);
/**
* Returns registered chat model listeners for monitoring requests/responses.
*
* @return unmodifiable list of listeners, never null (may be empty)
*/
public List<ChatModelListener> listeners();
/**
* Returns the model provider (always ANTHROPIC).
*
* @return ANTHROPIC provider constant, never null
*/
public ModelProvider provider();
/**
* Returns default request parameters set during construction.
*
* @return default parameters, may be null if not configured
*/
public ChatRequestParameters defaultRequestParameters();
/**
* Returns capabilities supported by this model instance.
*
* @return set of supported capabilities, never null
*/
public Set<Capability> supportedCapabilities();
}The streaming model builder has the same configuration options as AnthropicChatModel, except it does not support maxRetries (streaming connections don't retry).
package dev.langchain4j.model.anthropic;
import dev.langchain4j.http.client.HttpClientBuilder;
import dev.langchain4j.model.chat.listener.ChatModelListener;
import dev.langchain4j.model.chat.request.ChatRequestParameters;
import dev.langchain4j.model.chat.request.ResponseFormat;
import dev.langchain4j.model.chat.request.ToolChoice;
import dev.langchain4j.model.Capability;
import dev.langchain4j.agent.tool.ToolSpecification;
import org.slf4j.Logger;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class AnthropicStreamingChatModelBuilder {
// Connection & Authentication
/**
* Sets the Anthropic API key for authentication.
*
* @param apiKey the API key, must not be null or empty
* @return this builder, never null
* @throws IllegalArgumentException if apiKey is null or empty
* @default no default - REQUIRED parameter
*/
public AnthropicStreamingChatModelBuilder apiKey(String apiKey);
/**
* Sets the base URL for Anthropic API.
*
* @param baseUrl the base URL including trailing slash, must not be null
* @return this builder, never null
* @default "https://api.anthropic.com/v1/"
*/
public AnthropicStreamingChatModelBuilder baseUrl(String baseUrl);
/**
* Sets the API version header.
*
* @param version the API version string, must not be null
* @return this builder, never null
* @default "2023-06-01"
*/
public AnthropicStreamingChatModelBuilder version(String version);
/**
* Sets beta feature flags (e.g., "prompt-caching-2024-07-31").
*
* @param beta comma-separated beta feature identifiers, may be null
* @return this builder, never null
* @default null (no beta features)
*/
public AnthropicStreamingChatModelBuilder beta(String beta);
/**
* Sets custom HTTP client builder for advanced HTTP configuration.
*
* @param httpClientBuilder custom HTTP client builder, must not be null
* @return this builder, never null
* @default platform default HTTP client
*/
public AnthropicStreamingChatModelBuilder httpClientBuilder(HttpClientBuilder httpClientBuilder);
/**
* Sets request timeout for API calls.
*
* @param timeout timeout duration, must not be null, must be positive
* @return this builder, never null
* @throws IllegalArgumentException if timeout is null or not positive
* @default 60 seconds
*/
public AnthropicStreamingChatModelBuilder timeout(Duration timeout);
/**
* Sets user ID for abuse detection and tracking.
*
* @param userId user identifier, may be null
* @return this builder, never null
* @default null (no user tracking)
*/
public AnthropicStreamingChatModelBuilder userId(String userId);
// Model Configuration
/**
* Sets model name using string identifier.
*
* @param modelName model identifier, must not be null
* @return this builder, never null
* @default "claude-sonnet-4-5-20250929"
*/
public AnthropicStreamingChatModelBuilder modelName(String modelName);
/**
* Sets model name using enum constant.
*
* @param modelName model name enum, must not be null
* @return this builder, never null
* @default CLAUDE_SONNET_4_5_20250929
*/
public AnthropicStreamingChatModelBuilder modelName(AnthropicChatModelName modelName);
/**
* Sets temperature for response randomness.
*
* @param temperature sampling temperature, must be null or in range [0.0, 1.0]
* @return this builder, never null
* @throws IllegalArgumentException if temperature out of range
* @default null (API default)
*/
public AnthropicStreamingChatModelBuilder temperature(Double temperature);
/**
* Sets nucleus sampling threshold.
*
* @param topP nucleus sampling parameter, must be null or in range (0.0, 1.0]
* @return this builder, never null
* @throws IllegalArgumentException if topP out of range
* @default null (API default)
*/
public AnthropicStreamingChatModelBuilder topP(Double topP);
/**
* Sets top-K sampling parameter.
*
* @param topK top-K parameter, must be null or >= 0
* @return this builder, never null
* @throws IllegalArgumentException if topK negative
* @default null (API default)
*/
public AnthropicStreamingChatModelBuilder topK(Integer topK);
/**
* Sets maximum number of tokens to generate.
*
* @param maxTokens maximum output tokens, must be null or > 0
* @return this builder, never null
* @throws IllegalArgumentException if maxTokens not positive
* @default 1024
*/
public AnthropicStreamingChatModelBuilder maxTokens(Integer maxTokens);
/**
* Sets custom stop sequences (max 4).
*
* @param stopSequences list of stop strings, may be null
* @return this builder, never null
* @throws IllegalArgumentException if more than 4 sequences
* @default null
*/
public AnthropicStreamingChatModelBuilder stopSequences(List<String> stopSequences);
/**
* Sets response format for structured output.
*
* @param responseFormat desired response format, may be null
* @return this builder, never null
* @default null (plain text)
*/
public AnthropicStreamingChatModelBuilder responseFormat(ResponseFormat responseFormat);
// Tool Support
/**
* Sets tool specifications from list.
*
* @param toolSpecifications list of tool specs, may be null
* @return this builder, never null
* @default null
*/
public AnthropicStreamingChatModelBuilder toolSpecifications(List<ToolSpecification> toolSpecifications);
/**
* Sets tool specifications from varargs.
*
* @param toolSpecifications tool specs array, may be null
* @return this builder, never null
* @default null
*/
public AnthropicStreamingChatModelBuilder toolSpecifications(ToolSpecification... toolSpecifications);
/**
* Sets tool choice strategy.
*
* @param toolChoice tool choice strategy, may be null
* @return this builder, never null
* @default ToolChoice.AUTO
*/
public AnthropicStreamingChatModelBuilder toolChoice(ToolChoice toolChoice);
/**
* Sets specific tool name for REQUIRED toolChoice.
*
* @param toolChoiceName required tool name, may be null
* @return this builder, never null
* @default null
*/
public AnthropicStreamingChatModelBuilder toolChoiceName(String toolChoiceName);
/**
* Disables parallel tool execution.
*
* @param disableParallelToolUse whether to disable parallel execution, may be null
* @return this builder, never null
* @default false (parallel enabled)
*/
public AnthropicStreamingChatModelBuilder disableParallelToolUse(Boolean disableParallelToolUse);
/**
* Enables strict schema validation.
*
* @param strictTools whether to enable strict validation, may be null
* @return this builder, never null
* @default false
*/
public AnthropicStreamingChatModelBuilder strictTools(Boolean strictTools);
/**
* Sets metadata keys to send (set).
*
* @param toolMetadataKeysToSend set of key names, may be null
* @return this builder, never null
* @default empty set
*/
public AnthropicStreamingChatModelBuilder toolMetadataKeysToSend(Set<String> toolMetadataKeysToSend);
/**
* Sets metadata keys to send (varargs).
*
* @param toolMetadataKeysToSend key names, may be null
* @return this builder, never null
* @default empty set
*/
public AnthropicStreamingChatModelBuilder toolMetadataKeysToSend(String... toolMetadataKeysToSend);
// Server Tools
/**
* Sets server-side tools (EXPERIMENTAL).
*
* @param serverTools list of server tools, may be null
* @return this builder, never null
* @default null
*/
public AnthropicStreamingChatModelBuilder serverTools(List<AnthropicServerTool> serverTools);
/**
* Sets server-side tools (EXPERIMENTAL, varargs).
*
* @param serverTools server tools array, may be null
* @return this builder, never null
* @default null
*/
public AnthropicStreamingChatModelBuilder serverTools(AnthropicServerTool... serverTools);
/**
* Controls server tool result inclusion.
*
* @param returnServerToolResults whether to include results, may be null
* @return this builder, never null
* @default false
*/
public AnthropicStreamingChatModelBuilder returnServerToolResults(Boolean returnServerToolResults);
// Caching
/**
* Enables system message caching.
*
* @param cacheSystemMessages whether to cache, may be null
* @return this builder, never null
* @default false
*/
public AnthropicStreamingChatModelBuilder cacheSystemMessages(Boolean cacheSystemMessages);
/**
* Enables tool definition caching.
*
* @param cacheTools whether to cache, may be null
* @return this builder, never null
* @default false
*/
public AnthropicStreamingChatModelBuilder cacheTools(Boolean cacheTools);
// Extended Thinking
/**
* Enables extended thinking mode.
*
* @param thinkingType thinking mode ("enabled"), may be null
* @return this builder, never null
* @throws RuntimeException if model doesn't support thinking
* @default null (disabled)
*/
public AnthropicStreamingChatModelBuilder thinkingType(String thinkingType);
/**
* Sets thinking token budget.
*
* @param thinkingBudgetTokens max thinking tokens, must be null or > 0
* @return this builder, never null
* @throws IllegalArgumentException if not positive
* @default null (model default)
*/
public AnthropicStreamingChatModelBuilder thinkingBudgetTokens(Integer thinkingBudgetTokens);
/**
* Controls thinking text return.
*
* @param returnThinking whether to return thinking, may be null
* @return this builder, never null
* @default false
*/
public AnthropicStreamingChatModelBuilder returnThinking(Boolean returnThinking);
/**
* Controls thinking in follow-ups.
*
* @param sendThinking whether to send thinking, may be null
* @return this builder, never null
* @default true
*/
public AnthropicStreamingChatModelBuilder sendThinking(Boolean sendThinking);
// Logging
/**
* Enables request logging.
*
* @param logRequests whether to log requests, may be null
* @return this builder, never null
* @default false
*/
public AnthropicStreamingChatModelBuilder logRequests(Boolean logRequests);
/**
* Enables response logging.
*
* @param logResponses whether to log responses, may be null
* @return this builder, never null
* @default false
*/
public AnthropicStreamingChatModelBuilder logResponses(Boolean logResponses);
/**
* Sets custom SLF4J logger.
*
* @param logger custom logger, may be null
* @return this builder, never null
* @default class logger
*/
public AnthropicStreamingChatModelBuilder logger(Logger logger);
/**
* Sets chat model listeners.
*
* @param listeners list of listeners, may be null
* @return this builder, never null
* @default empty list
*/
public AnthropicStreamingChatModelBuilder listeners(List<ChatModelListener> listeners);
// Advanced
/**
* Sets custom parameters for experimental features.
*
* @param customParameters map of parameters, may be null
* @return this builder, never null
* @default null
*/
public AnthropicStreamingChatModelBuilder customParameters(Map<String, Object> customParameters);
/**
* Sets default request parameters.
*
* @param parameters default parameters, may be null
* @return this builder, never null
* @default null
*/
public AnthropicStreamingChatModelBuilder defaultRequestParameters(ChatRequestParameters parameters);
/**
* Sets supported capabilities (varargs).
*
* @param supportedCapabilities capability constants, may be null
* @return this builder, never null
* @default auto-detected
*/
public AnthropicStreamingChatModelBuilder supportedCapabilities(Capability... supportedCapabilities);
/**
* Sets supported capabilities (set).
*
* @param supportedCapabilities set of capabilities, may be null
* @return this builder, never null
* @default auto-detected
*/
public AnthropicStreamingChatModelBuilder supportedCapabilities(Set<Capability> supportedCapabilities);
/**
* Builds the configured streaming model.
* Thread-safe after construction.
*
* @return configured model instance, never null
* @throws IllegalStateException if apiKey missing
* @throws IllegalArgumentException if configuration invalid
*/
public AnthropicStreamingChatModel build();
}Create and use a streaming chat model.
import dev.langchain4j.model.anthropic.AnthropicStreamingChatModel;
import dev.langchain4j.model.anthropic.AnthropicChatModelName;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.data.message.UserMessage;
// Create streaming model
AnthropicStreamingChatModel model = AnthropicStreamingChatModel.builder()
.apiKey(System.getenv("ANTHROPIC_API_KEY"))
.modelName(AnthropicChatModelName.CLAUDE_SONNET_4_5_20250929)
.temperature(0.7)
.build();
// Create request
ChatRequest request = ChatRequest.builder()
.messages(UserMessage.from("Tell me a short story"))
.build();
// Stream response
model.doChat(request, new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String token) {
System.out.print(token);
}
@Override
public void onCompleteResponse(ChatResponse completeResponse) {
System.out.println("\n\nStreaming complete!");
System.out.println("Total tokens: " +
response.metadata().tokenUsage().totalTokenCount());
}
@Override
public void onError(Throwable error) {
System.err.println("Error: " + error.getMessage());
}
});Thread Safety:
Performance Notes:
The StreamingChatResponseHandler interface handles streaming events.
package dev.langchain4j.model.chat.response;
import dev.langchain4j.model.chat.response.ChatResponse;
/**
* Callback handler for streaming chat responses.
* Methods invoked asynchronously from network I/O thread.
* Implementations should be thread-safe if accessing shared state.
*/
public interface StreamingChatResponseHandler {
/**
* Called for each token as it arrives from the stream.
* Tokens are partial text fragments; concatenate to build full response.
*
* @param token text token fragment, never null
*/
void onPartialResponse(String token);
/**
* Called for each chunk of thinking text (if thinking enabled).
* Only invoked when returnThinking(true) configured.
*
* @param thinking partial thinking content, never null
*/
void onPartialThinking(PartialThinking thinking);
/**
* Called once when streaming completes successfully.
* Provides complete response with full text and metadata.
*
* @param completeResponse final response with metadata, never null
*/
void onCompleteResponse(ChatResponse completeResponse);
/**
* Called if streaming fails or is interrupted.
* No other callbacks invoked after this.
*
* @param error exception that caused failure, never null
*/
void onError(Throwable error);
}Usage Example:
StreamingChatResponseHandler handler = new StreamingChatResponseHandler() {
private StringBuilder fullResponse = new StringBuilder();
@Override
public void onPartialResponse(String token) {
fullResponse.append(token);
System.out.print(token);
System.out.flush();
}
@Override
public void onPartialThinking(PartialThinking thinking) {
// Called when thinking text is streamed (if returnThinking enabled)
System.out.println("[Thinking: " + thinking.text() + "]");
}
@Override
public void onCompleteResponse(ChatResponse completeResponse) {
System.out.println("\n\nFinal response: " + fullResponse.toString());
// Access metadata
AnthropicChatResponseMetadata metadata =
(AnthropicChatResponseMetadata) response.metadata();
System.out.println("Model: " + metadata.modelName());
System.out.println("Finish reason: " + metadata.finishReason());
// Token usage
AnthropicTokenUsage usage = metadata.tokenUsage();
System.out.println("Input tokens: " + usage.inputTokenCount());
System.out.println("Output tokens: " + usage.outputTokenCount());
}
@Override
public void onError(Throwable error) {
System.err.println("Streaming error: " + error.getMessage());
error.printStackTrace();
}
};
model.doChat(request, handler);Error Handling:
onError() invoked for: network failures, timeouts, API errors, rate limitsCommon Pitfalls:
❌ DON'T block in callback methods
@Override
public void onPartialResponse(String token) {
Thread.sleep(1000); // Blocks I/O thread, breaks streaming
}✅ DO keep callbacks fast and non-blocking
@Override
public void onPartialResponse(String token) {
buffer.append(token); // Fast operation
}❌ DON'T access shared mutable state without synchronization
private List<String> tokens = new ArrayList<>(); // Not thread-safe
@Override
public void onPartialResponse(String token) {
tokens.add(token); // Race condition if handler reused
}✅ DO synchronize shared state
private final List<String> tokens = Collections.synchronizedList(new ArrayList<>());
@Override
public void onPartialResponse(String token) {
tokens.add(token); // Thread-safe
}Tools work with streaming models, with tool call results sent as individual events.
import dev.langchain4j.agent.tool.ToolSpecification;
ToolSpecification weatherTool = ToolSpecification.builder()
.name("get_weather")
.description("Get current weather for a location")
.parameters(/* JSON schema */)
.build();
AnthropicStreamingChatModel model = AnthropicStreamingChatModel.builder()
.apiKey(apiKey)
.modelName(AnthropicChatModelName.CLAUDE_SONNET_4_5_20250929)
.toolSpecifications(weatherTool)
.build();
model.doChat(request, new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String token) {
System.out.print(token);
}
@Override
public void onCompleteResponse(ChatResponse completeResponse) {
// Check for tool calls in final response
if (response.aiMessage().hasToolExecutionRequests()) {
List<ToolExecutionRequest> requests =
response.aiMessage().toolExecutionRequests();
// Process tool calls...
}
}
@Override
public void onError(Throwable error) {
error.printStackTrace();
}
});Tool Execution Notes:
Enable thinking mode to receive thinking text during streaming.
AnthropicStreamingChatModel model = AnthropicStreamingChatModel.builder()
.apiKey(apiKey)
.modelName(AnthropicChatModelName.CLAUDE_OPUS_4_5_20251101)
.thinkingType("enabled")
.thinkingBudgetTokens(5000)
.returnThinking(true) // Enable thinking in responses
.build();
model.doChat(request, new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String token) {
System.out.print(token);
}
@Override
public void onPartialThinking(PartialThinking thinking) {
// Called as thinking text is streamed
System.out.println("[Thinking: " + thinking.text() + "]");
}
@Override
public void onCompleteResponse(ChatResponse completeResponse) {
String fullThinking = response.aiMessage().thinking();
String finalAnswer = response.aiMessage().text();
System.out.println("\n\nFull thinking: " + fullThinking);
System.out.println("Answer: " + finalAnswer);
}
@Override
public void onError(Throwable error) {
error.printStackTrace();
}
});Thinking Streaming Behavior:
Enable caching for system messages and tools in streaming mode.
AnthropicStreamingChatModel model = AnthropicStreamingChatModel.builder()
.apiKey(apiKey)
.modelName(AnthropicChatModelName.CLAUDE_SONNET_4_5_20250929)
.cacheSystemMessages(true)
.cacheTools(true)
.build();
model.doChat(request, new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String token) {
System.out.print(token);
}
@Override
public void onCompleteResponse(ChatResponse completeResponse) {
AnthropicChatResponseMetadata metadata =
(AnthropicChatResponseMetadata) response.metadata();
AnthropicTokenUsage usage = metadata.tokenUsage();
System.out.println("\nCache metrics:");
System.out.println(" Cache read: " + usage.cacheReadInputTokens());
System.out.println(" Cache creation: " + usage.cacheCreationInputTokens());
System.out.println(" Regular input: " + usage.inputTokenCount());
}
@Override
public void onError(Throwable error) {
error.printStackTrace();
}
});Cache Behavior:
Streaming models can be used with LangChain4j's AI Services.
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
interface StreamingAssistant {
TokenStream chat(String message);
}
AnthropicStreamingChatModel model = AnthropicStreamingChatModel.builder()
.apiKey(apiKey)
.modelName(AnthropicChatModelName.CLAUDE_SONNET_4_5_20250929)
.build();
StreamingAssistant assistant = AiServices.create(StreamingAssistant.class, model);
TokenStream stream = assistant.chat("Tell me a joke");
stream.onNext(System.out::print)
.onCompleteResponse(response -> System.out.println("\nDone!"))
.onError(Throwable::printStackTrace)
.start();AI Services Integration:
Access raw SSE events from the streaming response.
model.doChat(request, new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String token) {
System.out.print(token);
}
@Override
public void onCompleteResponse(ChatResponse completeResponse) {
AnthropicChatResponseMetadata metadata =
(AnthropicChatResponseMetadata) response.metadata();
// Access raw SSE events
List<ServerSentEvent> events = metadata.rawServerSentEvents();
if (events != null) {
System.out.println("\nReceived " + events.size() + " SSE events");
for (ServerSentEvent event : events) {
System.out.println("Event type: " + event.name());
System.out.println("Event data: " + event.data());
}
}
}
@Override
public void onError(Throwable error) {
error.printStackTrace();
}
});SSE Events:
package dev.langchain4j.model.chat.response;
/**
* Handler for streaming chat response events.
* Thread-safety: Handler methods invoked from I/O thread; synchronize shared state.
*/
public interface StreamingChatResponseHandler {
/**
* Called for each token fragment.
*
* @param token text fragment, never null
*/
void onPartialResponse(String token);
/**
* Called for thinking text chunks (if enabled).
*
* @param thinking partial thinking content, never null
*/
void onPartialThinking(PartialThinking thinking);
/**
* Called once when complete.
*
* @param completeResponse final response, never null
*/
void onCompleteResponse(ChatResponse completeResponse);
/**
* Called on failure.
*
* @param error exception, never null
*/
void onError(Throwable error);
}package dev.langchain4j.model.chat.response;
/**
* Partial thinking text fragment during streaming.
*/
public class PartialThinking {
/**
* Returns thinking text fragment.
*
* @return text fragment, never null
*/
public String text();
}package dev.langchain4j.http.client.sse;
/**
* Raw server-sent event from streaming API.
*/
public class ServerSentEvent {
/**
* Returns event type name.
*
* @return event name, never null
*/
public String name();
/**
* Returns event data payload.
*
* @return data string, never null
*/
public String data();
/**
* Returns event ID.
*
* @return event ID or null if not present
*/
public String id();
}maxRetries option)onComplete callback receives the final ChatResponse with full metadataonPartialThinking callbackAnthropicChatResponseMetadataResponseFormat type is from langchain4j-core for structured output controlHttpClientBuilder is from dev.langchain4j.http.client for custom HTTP configurationLogger is org.slf4j.Logger for SLF4J-compatible loggingResource Lifecycle:
Install with Tessl CLI
npx tessl i tessl/maven-dev-langchain4j--langchain4j-anthropic