Gemini CLI Core - Core functionality library for the open-source AI agent that brings the power of Gemini directly into your terminal.
Overall
score
87%
Evaluation — 87%
↑ 1.01xAgent success when using this tile
Core AI functionality for content generation, chat management, and agentic interactions with Google's Gemini models. This module provides the primary interfaces for AI-powered applications.
Main client class that orchestrates AI interactions, tool execution, and conversation management.
/**
* Main client for Gemini API interactions with tool integration and conversation management
*/
class GeminiClient {
constructor(config: Config);
// Initialization and setup
initialize(): Promise<void>;
isInitialized(): boolean;
// Chat management
startChat(extraHistory?: Content[]): Promise<GeminiChat>;
getChat(): GeminiChat;
resetChat(): Promise<void>;
// History management
addHistory(content: Content): Promise<void>;
getHistory(): Content[];
stripThoughtsFromHistory(): void;
setHistory(history: Content[]): void;
// Content generation
generateContent(
contents: Content[],
generationConfig: GenerateContentConfig,
abortSignal: AbortSignal,
model: string
): Promise<GenerateContentResponse>;
generateJson<T>(
contents: Content[],
schema: Record<string, unknown>,
abortSignal: AbortSignal,
model: string,
generationConfig?: GenerateContentConfig
): Promise<T>;
sendMessageStream(
request: PartListUnion,
signal: AbortSignal,
prompt_id: string,
turns?: number,
originalModel?: string
): AsyncGenerator<ServerGeminiStreamEvent, Turn>;
// Tool management
setTools(): Promise<void>;
// Context management
addDirectoryContext(): Promise<void>;
// Advanced features
generateEmbedding(texts: string[]): Promise<number[][]>;
tryCompressChat(
prompt_id: string,
force?: boolean
): Promise<ChatCompressionInfo>;
// Services
getChatRecordingService(): ChatRecordingService | undefined;
}Chat session management with streaming support, tool integration, and conversation history.
/**
* Chat session management with streaming support and tool integration
*/
class GeminiChat {
constructor(
config: Config,
generationConfig?: GenerateContentConfig,
history?: Content[]
);
// Message sending
sendMessage(
params: SendMessageParameters,
prompt_id: string
): Promise<GenerateContentResponse>;
sendMessageStream(
params: SendMessageParameters,
prompt_id: string
): Promise<AsyncGenerator<StreamEvent>>;
// History management
getHistory(curated?: boolean): Content[];
clearHistory(): void;
addHistory(content: Content): void;
setHistory(history: Content[]): void;
stripThoughtsFromHistory(): void;
// Configuration
setSystemInstruction(sysInstr: string): void;
setTools(tools: Tool[]): void;
// Error handling and context
maybeIncludeSchemaDepthContext(error: StructuredError): Promise<void>;
// Services
getChatRecordingService(): ChatRecordingService;
recordCompletedToolCalls(toolCalls: CompletedToolCall[]): void;
}
interface SendMessageOptions {
systemInstruction?: string;
tools?: Tool[];
generationConfig?: GenerationConfig;
}Content generation configuration and factory functions for different authentication types.
/**
* Interface for content generation with multiple provider support
*/
interface ContentGenerator {
generateContent(request: GenerateContentRequest): Promise<GenerateContentResponse>;
generateContentStream(request: GenerateContentRequest): AsyncGenerator<GenerateContentResponse>;
countTokens(request: CountTokensRequest): Promise<CountTokensResponse>;
embedContent(request: EmbedContentRequest): Promise<EmbedContentResponse>;
}
/**
* Configuration for content generator initialization
*/
interface ContentGeneratorConfig {
authType: AuthType;
apiKey?: string;
projectId?: string;
location?: string;
model: string;
credentials?: any;
}
/**
* Create content generator configuration based on auth type
* @param config - Application configuration
* @param authType - Authentication method to use
* @returns Configuration for content generator
*/
function createContentGeneratorConfig(
config: Config,
authType: AuthType
): ContentGeneratorConfig;
/**
* Create content generator instance
* @param config - Content generator configuration
* @param gcConfig - Global configuration
* @param sessionId - Optional session identifier
* @returns Content generator instance
*/
function createContentGenerator(
config: ContentGeneratorConfig,
gcConfig: Config,
sessionId?: string
): Promise<ContentGenerator>;Decorator for logging all content generation API calls for debugging and monitoring.
/**
* Decorator for logging content generation API calls
*/
class LoggingContentGenerator implements ContentGenerator {
constructor(wrapped: ContentGenerator, logger?: Logger);
getWrapped(): ContentGenerator;
generateContent(request: GenerateContentRequest): Promise<GenerateContentResponse>;
generateContentStream(request: GenerateContentRequest): AsyncGenerator<GenerateContentResponse>;
countTokens(request: CountTokensRequest): Promise<CountTokensResponse>;
embedContent(request: EmbedContentRequest): Promise<EmbedContentResponse>;
}Agentic loop management for multi-turn conversations with tool execution and event streaming.
/**
* Manages agentic loop turns with tool execution and event streaming
*/
class Turn {
constructor(
chat: GeminiChat,
coreToolScheduler: CoreToolScheduler,
config: Config,
logger?: Logger
);
// Event streaming
sendMessageStream(
message: string | Part[],
onEvent?: (event: GeminiEvent) => void
): AsyncGenerator<GeminiEvent>;
// Turn execution
executeTurn(request: GenerateContentRequest): Promise<GenerateContentResult>;
}
/**
* Event types for streaming Gemini interactions
*/
enum GeminiEventType {
Content = 'Content',
ToolCallRequest = 'ToolCallRequest',
ToolCallResponse = 'ToolCallResponse',
ToolCallConfirmation = 'ToolCallConfirmation',
UserCancelled = 'UserCancelled',
Error = 'Error',
ChatCompressed = 'ChatCompressed',
Thought = 'Thought',
MaxSessionTurns = 'MaxSessionTurns',
Finished = 'Finished',
LoopDetected = 'LoopDetected',
Citation = 'Citation',
Retry = 'Retry'
}
/**
* Stream event types for real-time communication
*/
enum StreamEventType {
CHUNK = 'CHUNK',
RETRY = 'RETRY'
}
type StreamEvent =
| { type: StreamEventType.CHUNK; chunk: EnhancedGenerateContentResponse }
| { type: StreamEventType.RETRY };Specialized error classes for AI operations and error detection utilities.
/**
* Error for empty response streams
*/
class EmptyStreamError extends Error {
constructor(message?: string);
}
/**
* Structured error with status code information
*/
interface StructuredError {
message: string;
status?: number;
code?: string;
details?: any;
}
/**
* Check if error message indicates schema depth issues
* @param errorMessage - Error message to check
* @returns True if schema depth error
*/
function isSchemaDepthError(errorMessage: string): boolean;
/**
* Check if error message indicates invalid argument
* @param errorMessage - Error message to check
* @returns True if invalid argument error
*/
function isInvalidArgumentError(errorMessage: string): boolean;Chat compression functionality for managing conversation length and token usage.
/**
* Compression status enumeration
*/
enum CompressionStatus {
COMPRESSED = 'COMPRESSED',
COMPRESSION_FAILED_INFLATED_TOKEN_COUNT = 'COMPRESSION_FAILED_INFLATED_TOKEN_COUNT',
COMPRESSION_FAILED_TOKEN_COUNT_ERROR = 'COMPRESSION_FAILED_TOKEN_COUNT_ERROR',
NOOP = 'NOOP'
}
/**
* Chat compression information
*/
interface ChatCompressionInfo {
status: CompressionStatus;
originalTokens: number;
compressedTokens: number;
compressionRatio: number;
}
/**
* Find index after specified fraction of conversation history
* @param history - Conversation history
* @param fraction - Fraction point (0-1)
* @returns Index position
*/
function findIndexAfterFraction(history: Content[], fraction: number): number;Utility functions for model capabilities and content processing.
/**
* Check if model supports thinking capability
* @param model - Model name to check
* @returns True if thinking is supported
*/
function isThinkingSupported(model: string): boolean;
/**
* Check if thinking is default for model
* @param model - Model name to check
* @returns True if thinking is default
*/
function isThinkingDefault(model: string): boolean;
/**
* Convert part list union to string representation
* @param value - Part list union value
* @returns String representation
*/
function partListUnionToString(value: PartListUnion): string;
/**
* Gemini code request type alias
*/
type GeminiCodeRequest = PartListUnion;Usage Examples:
import {
GeminiClient,
createContentGeneratorConfig,
createContentGenerator,
AuthType,
Config
} from '@google/gemini-cli-core';
// Basic setup
const config = new Config({
model: 'gemini-1.5-flash',
apiKey: process.env.GEMINI_API_KEY
});
const generatorConfig = createContentGeneratorConfig(config, AuthType.USE_GEMINI);
const contentGenerator = await createContentGenerator(generatorConfig, config);
const client = new GeminiClient(config, contentGenerator);
await client.initialize();
// Simple content generation
const response = await client.generateContent({
contents: [{
role: 'user',
parts: [{ text: 'What is quantum computing?' }]
}]
});
console.log(response.response.text());
// Streaming conversation with tools
const chat = await client.startChat();
for await (const event of client.sendMessageStream('List files and analyze the project structure')) {
switch (event.type) {
case 'Content':
process.stdout.write(event.content);
break;
case 'ToolCallRequest':
console.log(`Calling tool: ${event.toolName}`);
break;
case 'ToolCallResponse':
console.log(`Tool result: ${event.result}`);
break;
}
}
// JSON generation
const analysis = await client.generateJson<{summary: string, recommendations: string[]}>(
{
contents: [{
role: 'user',
parts: [{ text: 'Analyze this codebase and return a JSON summary with recommendations' }]
}]
}
);
console.log(analysis.summary);
console.log(analysis.recommendations);Install with Tessl CLI
npx tessl i tessl/npm-google--gemini-cli-coredocs
evals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9
scenario-10