Java idiomatic SDK for the Gemini Developer APIs and Vertex AI APIs
WARNING: This API is experimental and subject to change.
Real-time bidirectional communication with AI models using WebSocket connections. Live sessions enable low-latency, streaming interactions for applications requiring immediate responses, such as voice assistants, real-time translation, or interactive chatbots.
import com.google.genai.AsyncLive;
import com.google.genai.AsyncSession;
import com.google.genai.types.LiveConnectConfig;
import com.google.genai.types.LiveSendClientContentParameters;
import com.google.genai.types.LiveSendRealtimeInputParameters;
import com.google.genai.types.LiveSendToolResponseParameters;
import com.google.genai.types.LiveServerMessage;
import java.util.concurrent.CompletableFuture;
import java.util.function.Consumer;package com.google.genai;
public class AsyncLive {
public CompletableFuture<AsyncSession> connect(
String model,
LiveConnectConfig config);
}package com.google.genai;
public final class AsyncSession {
// Send client content
public CompletableFuture<Void> sendClientContent(
LiveSendClientContentParameters clientContent);
// Send realtime audio input
public CompletableFuture<Void> sendRealtimeInput(
LiveSendRealtimeInputParameters realtimeInput);
// Send tool/function response
public CompletableFuture<Void> sendToolResponse(
LiveSendToolResponseParameters toolResponse);
// Register callback for server messages
public CompletableFuture<Void> receive(
Consumer<LiveServerMessage> onMessage);
// Close WebSocket connection
public CompletableFuture<Void> close();
// Get session ID
public String sessionId();
}package com.google.genai.types;
public final class LiveConnectConfig {
public static Builder builder();
public Optional<GenerateContentConfig> config();
public Optional<HttpOptions> httpOptions();
}package com.google.genai.types;
public final class LiveServerMessage {
public static LiveServerMessage fromJson(String json);
public Optional<SetupComplete> setupComplete();
public Optional<ServerContent> serverContent();
public Optional<ToolCall> toolCall();
public Optional<ToolCallCancellation> toolCallCancellation();
}import com.google.genai.Client;
import com.google.genai.AsyncLive;
import com.google.genai.AsyncSession;
Client client = new Client();
// Connect to live session
CompletableFuture<AsyncSession> sessionFuture =
client.async.live.connect("gemini-2.0-flash", null);
sessionFuture.thenAccept(session -> {
System.out.println("Connected! Session ID: " + session.sessionId());
// Register message receiver
session.receive(message -> {
message.serverContent().ifPresent(content -> {
System.out.println("Server: " + content);
});
});
// Send a message
LiveSendClientContentParameters params =
LiveSendClientContentParameters.builder()
.turns(List.of(
Content.fromParts(Part.fromText("Hello from live session!"))
))
.build();
session.sendClientContent(params).thenRun(() -> {
System.out.println("Message sent successfully");
});
});import com.google.genai.types.LiveConnectConfig;
import com.google.genai.types.GenerateContentConfig;
GenerateContentConfig contentConfig = GenerateContentConfig.builder()
.temperature(0.7)
.maxOutputTokens(1024)
.build();
LiveConnectConfig connectConfig = LiveConnectConfig.builder()
.config(contentConfig)
.build();
CompletableFuture<AsyncSession> sessionFuture =
client.async.live.connect("gemini-2.0-flash", connectConfig);
sessionFuture.thenAccept(session -> {
System.out.println("Connected with config");
});import com.google.genai.types.LiveSendClientContentParameters;
import com.google.genai.types.Content;
import com.google.genai.types.Part;
import java.util.List;
AsyncSession session = /* ... */;
Content userMessage = Content.fromParts(
Part.fromText("What is the weather like today?")
);
LiveSendClientContentParameters params =
LiveSendClientContentParameters.builder()
.turns(List.of(userMessage))
.turnComplete(true)
.build();
session.sendClientContent(params).thenRun(() -> {
System.out.println("Content sent");
}).exceptionally(error -> {
System.err.println("Failed to send: " + error.getMessage());
return null;
});Content turn1 = Content.fromParts(Part.fromText("Tell me about AI"));
Content turn2 = Content.fromParts(Part.fromText("Specifically machine learning"));
LiveSendClientContentParameters params =
LiveSendClientContentParameters.builder()
.turns(List.of(turn1, turn2))
.turnComplete(true)
.build();
session.sendClientContent(params);import com.google.genai.types.LiveSendRealtimeInputParameters;
// Send audio chunks for real-time processing
byte[] audioData = /* audio bytes */;
LiveSendRealtimeInputParameters audioParams =
LiveSendRealtimeInputParameters.builder()
.mediaChunks(List.of(audioData))
.build();
session.sendRealtimeInput(audioParams).thenRun(() -> {
System.out.println("Audio sent");
});import com.google.genai.types.LiveSendToolResponseParameters;
import com.google.genai.types.FunctionResponse;
// Respond to a tool call from the model
FunctionResponse functionResponse = FunctionResponse.builder()
.name("get_weather")
.response(Map.of("temperature", 72, "condition", "sunny"))
.build();
LiveSendToolResponseParameters toolParams =
LiveSendToolResponseParameters.builder()
.functionResponses(List.of(functionResponse))
.build();
session.sendToolResponse(toolParams);import com.google.genai.types.LiveServerMessage;
AsyncSession session = /* ... */;
session.receive(message -> {
// Handle setup complete
message.setupComplete().ifPresent(setup -> {
System.out.println("Setup complete for session: " +
setup.sessionId().orElse("N/A"));
});
// Handle server content
message.serverContent().ifPresent(content -> {
content.modelTurn().ifPresent(turn -> {
turn.parts().ifPresent(parts -> {
for (Part part : parts) {
part.text().ifPresent(text -> {
System.out.print(text);
});
}
});
});
});
// Handle tool calls
message.toolCall().ifPresent(toolCall -> {
toolCall.functionCalls().ifPresent(calls -> {
for (FunctionCall call : calls) {
System.out.println("Tool call: " + call.name().orElse("N/A"));
// Execute function and send response
});
});
});
// Handle tool call cancellation
message.toolCallCancellation().ifPresent(cancellation -> {
System.out.println("Tool call cancelled");
});
});session.receive(message -> {
message.serverContent().ifPresent(content -> {
content.modelTurn().ifPresent(turn -> {
turn.parts().ifPresent(parts -> {
// Stream each chunk of text as it arrives
parts.forEach(part -> {
part.text().ifPresent(text -> {
System.out.print(text);
System.out.flush();
});
});
});
});
// Check if turn is complete
if (content.turnComplete().orElse(false)) {
System.out.println("\n[Turn complete]");
}
});
});import java.util.Scanner;
import java.util.concurrent.CountDownLatch;
public class LiveChatExample {
public static void main(String[] args) throws Exception {
Client client = new Client();
CompletableFuture<AsyncSession> sessionFuture =
client.async.live.connect("gemini-2.0-flash", null);
AsyncSession session = sessionFuture.get();
System.out.println("Connected! Session: " + session.sessionId());
CountDownLatch responseLatch = new CountDownLatch(1);
// Register message handler
session.receive(message -> {
message.serverContent().ifPresent(content -> {
content.modelTurn().ifPresent(turn -> {
turn.parts().ifPresent(parts -> {
parts.forEach(part -> {
part.text().ifPresent(text -> {
System.out.print(text);
});
});
});
});
if (content.turnComplete().orElse(false)) {
System.out.println();
responseLatch.countDown();
}
});
});
Scanner scanner = new Scanner(System.in);
while (true) {
System.out.print("\nYou: ");
String input = scanner.nextLine();
if ("exit".equalsIgnoreCase(input)) {
break;
}
Content userContent = Content.fromParts(Part.fromText(input));
LiveSendClientContentParameters params =
LiveSendClientContentParameters.builder()
.turns(List.of(userContent))
.turnComplete(true)
.build();
System.out.print("Bot: ");
session.sendClientContent(params).get();
// Wait for response to complete
responseLatch.await();
responseLatch = new CountDownLatch(1);
}
session.close().get();
scanner.close();
client.close();
}
}// Real-time audio streaming example
AsyncSession session = client.async.live.connect(
"gemini-2.0-flash",
null
).get();
session.receive(message -> {
message.serverContent().ifPresent(content -> {
// Handle audio response from model
content.modelTurn().ifPresent(turn -> {
turn.parts().ifPresent(parts -> {
parts.forEach(part -> {
part.text().ifPresent(text -> {
System.out.println("Model: " + text);
});
// Handle audio output if present
part.inlineData().ifPresent(blob -> {
// Play audio response
});
});
});
});
});
});
// Stream audio input in real-time
try (AudioInputStream audioStream = /* microphone input */) {
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = audioStream.read(buffer)) != -1) {
byte[] audioChunk = Arrays.copyOf(buffer, bytesRead);
LiveSendRealtimeInputParameters params =
LiveSendRealtimeInputParameters.builder()
.mediaChunks(List.of(audioChunk))
.build();
session.sendRealtimeInput(params);
}
}import com.google.genai.types.Tool;
import com.google.genai.types.FunctionDeclaration;
import com.google.genai.types.Schema;
// Define functions
FunctionDeclaration weatherFunction = FunctionDeclaration.builder()
.name("get_weather")
.description("Get current weather for a location")
.parameters(Schema.builder()
.type("object")
.properties(Map.of(
"location", Schema.builder().type("string").build()
))
.required(List.of("location"))
.build())
.build();
Tool tool = Tool.builder()
.functionDeclarations(List.of(weatherFunction))
.build();
GenerateContentConfig config = GenerateContentConfig.builder()
.tools(List.of(tool))
.build();
LiveConnectConfig liveConfig = LiveConnectConfig.builder()
.config(config)
.build();
AsyncSession session = client.async.live.connect(
"gemini-2.0-flash",
liveConfig
).get();
// Handle tool calls
session.receive(message -> {
message.toolCall().ifPresent(toolCall -> {
toolCall.functionCalls().ifPresent(calls -> {
for (FunctionCall call : calls) {
String functionName = call.name().orElse("");
JsonNode args = call.args().orElse(null);
if ("get_weather".equals(functionName)) {
// Execute function
String location = args.get("location").asText();
Map<String, Object> result = getWeather(location);
// Send response back
FunctionResponse response = FunctionResponse.builder()
.name(functionName)
.id(call.id().orElse(""))
.response(result)
.build();
LiveSendToolResponseParameters params =
LiveSendToolResponseParameters.builder()
.functionResponses(List.of(response))
.build();
session.sendToolResponse(params);
}
}
});
});
});AsyncSession session = /* ... */;
// Close when done
session.close().thenRun(() -> {
System.out.println("Session closed successfully");
}).exceptionally(error -> {
System.err.println("Error closing session: " + error.getMessage());
return null;
});Live sessions don't implement AutoCloseable, so manual cleanup is required:
AsyncSession session = null;
try {
session = client.async.live.connect("gemini-2.0-flash", null).get();
// Use session...
} finally {
if (session != null) {
session.close().get();
}
}CompletableFuture<AsyncSession> sessionFuture =
client.async.live.connect("gemini-2.0-flash", null);
sessionFuture
.thenAccept(session -> {
System.out.println("Connected successfully");
})
.exceptionally(error -> {
System.err.println("Connection failed: " + error.getMessage());
if (error instanceof GenAiIOException) {
System.err.println("I/O error during connection");
}
return null;
});session.sendClientContent(params)
.thenRun(() -> {
System.out.println("Message sent");
})
.exceptionally(error -> {
System.err.println("Failed to send message: " + error.getMessage());
// Optionally retry or reconnect
return null;
});// Keep session alive for multiple interactions
AsyncSession session = client.async.live.connect(
"gemini-2.0-flash",
null
).get();
try {
// Use session for multiple messages
for (String userInput : userInputs) {
LiveSendClientContentParameters params =
LiveSendClientContentParameters.builder()
.turns(List.of(Content.fromParts(Part.fromText(userInput))))
.turnComplete(true)
.build();
session.sendClientContent(params).get();
// Process response...
}
} finally {
// Always close when done
session.close().get();
}AsyncSession session = null;
int maxRetries = 3;
int retryCount = 0;
while (retryCount < maxRetries) {
try {
session = client.async.live.connect("gemini-2.0-flash", null).get();
break; // Success
} catch (Exception e) {
retryCount++;
System.err.println("Connection attempt " + retryCount + " failed");
if (retryCount < maxRetries) {
Thread.sleep(1000 * retryCount); // Exponential backoff
}
}
}
if (session == null) {
throw new RuntimeException("Failed to connect after " + maxRetries + " attempts");
}Since callbacks are async, use synchronization primitives when needed:
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
CountDownLatch responseLatch = new CountDownLatch(1);
AtomicReference<String> response = new AtomicReference<>();
session.receive(message -> {
message.serverContent().ifPresent(content -> {
if (content.turnComplete().orElse(false)) {
content.modelTurn().ifPresent(turn -> {
// Collect response
response.set(extractText(turn));
responseLatch.countDown();
});
}
});
});
// Send message
session.sendClientContent(params).get();
// Wait for response (with timeout)
if (responseLatch.await(30, TimeUnit.SECONDS)) {
System.out.println("Response: " + response.get());
} else {
System.err.println("Timeout waiting for response");
}Low-latency audio streaming for voice conversations with minimal delay.
Streaming text responses for responsive user experiences.
Real-time translation of speech or text with immediate feedback.
Interactive code completion and explanation with streaming responses.
Live analysis of user-generated content with immediate feedback.
Install with Tessl CLI
npx tessl i tessl/maven-com-google-genai--google-genaidocs