Fluent DSL and Kotlin DSL for building autonomous agents with planning capabilities on the JVM, featuring annotation-based and programmatic configuration for agentic flows with Spring Boot integration
The Embabel Agent API provides pre-configured model definitions for multiple LLM providers. These constants simplify model selection and ensure correct model identifiers.
OpenAI model definitions including GPT-4.1, GPT-5, and embedding models.
class OpenAiModels {
companion object {
/** GPT-4.1 Mini model */
const val GPT_41_MINI: String = "gpt-4.1-mini"
/** GPT-4.1 standard model */
const val GPT_41: String = "gpt-4.1"
/** GPT-4.1 Nano model */
const val GPT_41_NANO: String = "gpt-4.1-nano"
/** GPT-5 standard model */
const val GPT_5: String = "gpt-5"
/** GPT-5 Mini model */
const val GPT_5_MINI: String = "gpt-5-mini"
/** GPT-5 Nano model */
const val GPT_5_NANO: String = "gpt-5-nano"
/** Provider identifier */
const val PROVIDER: String = "OpenAI"
/** Text embedding model (small) */
const val TEXT_EMBEDDING_3_SMALL: String = "text-embedding-3-small"
/** Default text embedding model */
const val DEFAULT_TEXT_EMBEDDING_MODEL: String = TEXT_EMBEDDING_3_SMALL
}
}OpenAI Usage Example:
@Action(description = "Generate with GPT-5")
fun generateWithGpt5(prompt: String, context: ActionContext): String {
return context.ai()
.withLlm(LlmOptions.builder()
.model(OpenAiModels.GPT_5)
.temperature(0.7)
.build())
.generateText(prompt)
}
@Action(description = "Generate embeddings")
fun generateEmbedding(text: String, context: ActionContext): FloatArray {
return context.ai()
.withLlm(LlmOptions.builder()
.model(OpenAiModels.TEXT_EMBEDDING_3_SMALL)
.build())
.generateEmbedding(text)
}Anthropic Claude model definitions including Claude 3.x and 4.x series.
class AnthropicModels {
companion object {
/** Claude 3.7 Sonnet */
const val CLAUDE_37_SONNET: String = "claude-3-7-sonnet-latest"
/** Claude 3.5 Haiku */
const val CLAUDE_35_HAIKU: String = "claude-3-5-haiku-latest"
/** Claude Opus 4.0 */
const val CLAUDE_40_OPUS: String = "claude-opus-4-20250514"
/** Claude Opus 4.1 */
const val CLAUDE_41_OPUS: String = "claude-opus-4-1"
/** Claude Sonnet 4.5 */
const val CLAUDE_SONNET_4_5: String = "claude-sonnet-4-5"
/** Claude Haiku 4.5 */
const val CLAUDE_HAIKU_4_5: String = "claude-haiku-4-5"
/** Provider identifier */
const val PROVIDER: String = "Anthropic"
}
}Anthropic Usage Example:
@Action(description = "Generate with Claude Opus 4.1")
fun generateWithClaude(prompt: String, context: ActionContext): String {
return context.ai()
.withLlm(LlmOptions.builder()
.model(AnthropicModels.CLAUDE_41_OPUS)
.temperature(0.7)
.maxTokens(4096)
.build())
.generateText(prompt)
}
@Action(description = "Use extended thinking with Claude Sonnet")
fun thinkWithClaude(problem: String, context: ActionContext): Solution {
return context.promptRunner()
.withLlm(LlmOptions.builder()
.model(AnthropicModels.CLAUDE_SONNET_4_5)
.build())
.thinking()
.createObject("Solve: $problem", Solution::class.java)
.content
}Google Gemini model definitions (Java class) including Gemini 2.x and 3.x series.
public final class GeminiModels {
// Gemini 3.0 Family (Latest)
/** Gemini 3 Pro Preview */
public static final String GEMINI_3_PRO_PREVIEW = "gemini-3-pro-preview";
// Gemini 2.5 Family (Current Generation)
/** Gemini 2.5 Pro */
public static final String GEMINI_2_5_PRO = "gemini-2.5-pro";
/** Gemini 2.5 Flash */
public static final String GEMINI_2_5_FLASH = "gemini-2.5-flash";
/** Gemini 2.5 Flash Lite */
public static final String GEMINI_2_5_FLASH_LITE = "gemini-2.5-flash-lite";
// Gemini 2.0 Family (Previous Generation)
/** Gemini 2.0 Flash */
public static final String GEMINI_2_0_FLASH = "gemini-2.0-flash";
/** Gemini 2.0 Flash Lite */
public static final String GEMINI_2_0_FLASH_LITE = "gemini-2.0-flash-lite";
/** Provider identifier */
public static final String PROVIDER = "Google";
/** Text embedding model 004 */
public static final String TEXT_EMBEDDING_004 = "text-embedding-004";
/** Default text embedding model */
public static final String DEFAULT_TEXT_EMBEDDING_MODEL = TEXT_EMBEDDING_004;
}Google GenAI model definitions (Kotlin object) with native Spring AI Google GenAI support.
class GoogleGenAiModels {
companion object {
// Gemini 3 Family (Preview - Latest Generation)
/** Gemini 3 Pro Preview */
const val GEMINI_3_PRO_PREVIEW: String = "gemini-3-pro-preview"
/** Gemini 3 Flash Preview */
const val GEMINI_3_FLASH_PREVIEW: String = "gemini-3-flash-preview"
// Gemini 2.5 Family (Stable - Current Generation)
/** Gemini 2.5 Pro */
const val GEMINI_2_5_PRO: String = "gemini-2.5-pro"
/** Gemini 2.5 Flash */
const val GEMINI_2_5_FLASH: String = "gemini-2.5-flash"
/** Gemini 2.5 Flash Lite */
const val GEMINI_2_5_FLASH_LITE: String = "gemini-2.5-flash-lite"
// Gemini 2.0 Family (Previous Generation)
/** Gemini 2.0 Flash */
const val GEMINI_2_0_FLASH: String = "gemini-2.0-flash"
/** Gemini 2.0 Flash Lite */
const val GEMINI_2_0_FLASH_LITE: String = "gemini-2.0-flash-lite"
// Embedding Models
/** Gemini Embedding 001 */
const val GEMINI_EMBEDDING_001: String = "gemini-embedding-001"
/** Text Embedding 005 */
const val TEXT_EMBEDDING_005: String = "text-embedding-005"
/** Text Embedding 004 */
const val TEXT_EMBEDDING_004: String = "text-embedding-004"
/** Default text embedding model */
const val DEFAULT_TEXT_EMBEDDING_MODEL: String = TEXT_EMBEDDING_004
/** Provider identifier */
const val PROVIDER: String = "GoogleGenAI"
}
}Google Models Usage Example:
@Action(description = "Generate with Gemini")
fun generateWithGemini(prompt: String, context: ActionContext): String {
return context.ai()
.withLlm(LlmOptions.builder()
.model(GoogleGenAiModels.GEMINI_2_5_PRO)
.temperature(0.8)
.build())
.generateText(prompt)
}
@Action(description = "Analyze image with Gemini Vision")
fun analyzeImage(imageUrl: String, context: ActionContext): Analysis {
val image = Image.fromUrl(imageUrl)
return context.promptRunner()
.withLlm(LlmOptions.builder()
.model(GeminiModels.GEMINI_2_5_PRO)
.build())
.withImage(image)
.createObject<Analysis>("Analyze this image")
}Ollama local model definitions including Llama, Mistral, Gemma, code-specialized, and vision models.
class OllamaModels {
companion object {
/** Provider identifier */
const val PROVIDER: String = "Ollama"
// Llama Models
/** Llama 3.1 8B */
const val LLAMA_31_8B: String = "llama3.1:8b"
/** Llama 3.1 70B */
const val LLAMA_31_70B: String = "llama3.1:70b"
/** Llama 3.1 405B */
const val LLAMA_31_405B: String = "llama3.1:405b"
/** Llama 3 8B */
const val LLAMA_3_8B: String = "llama3:8b"
/** Llama 3 70B */
const val LLAMA_3_70B: String = "llama3:70b"
/** Llama 2 7B */
const val LLAMA_2_7B: String = "llama2:7b"
/** Llama 2 13B */
const val LLAMA_2_13B: String = "llama2:13b"
/** Llama 2 70B */
const val LLAMA_2_70B: String = "llama2:70b"
// Code-specialized Models
/** CodeLlama 7B */
const val CODELLAMA_7B: String = "codellama:7b"
/** CodeLlama 13B */
const val CODELLAMA_13B: String = "codellama:13b"
/** CodeLlama 34B */
const val CODELLAMA_34B: String = "codellama:34b"
/** CodeGemma 2B */
const val CODEGEMMA_2B: String = "codegemma:2b"
/** CodeGemma 7B */
const val CODEGEMMA_7B: String = "codegemma:7b"
// Gemma Models
/** Gemma 2B */
const val GEMMA_2B: String = "gemma:2b"
/** Gemma 7B */
const val GEMMA_7B: String = "gemma:7b"
/** Gemma2 9B */
const val GEMMA2_9B: String = "gemma2:9b"
/** Gemma2 27B */
const val GEMMA2_27B: String = "gemma2:27b"
// Mistral Models
/** Mistral 7B */
const val MISTRAL_7B: String = "mistral:7b"
/** Mixtral 8x7B */
const val MIXTRAL_8X7B: String = "mixtral:8x7b"
/** Mixtral 8x22B */
const val MIXTRAL_8X22B: String = "mixtral:8x22b"
// Specialized Models
/** Qwen2 0.5B */
const val QWEN2_0_5B: String = "qwen2:0.5b"
/** Qwen2 1.5B */
const val QWEN2_1_5B: String = "qwen2:1.5b"
/** Qwen2 7B */
const val QWEN2_7B: String = "qwen2:7b"
/** Qwen2 72B */
const val QWEN2_72B: String = "qwen2:72b"
/** Phi-3 Mini */
const val PHI3_MINI: String = "phi3:mini"
/** Phi-3 Medium */
const val PHI3_MEDIUM: String = "phi3:medium"
/** Neural Chat 7B */
const val NEURAL_CHAT_7B: String = "neural-chat:7b"
/** Orca Mini 3B */
const val ORCA_MINI_3B: String = "orca-mini:3b"
/** Vicuna 7B */
const val VICUNA_7B: String = "vicuna:7b"
/** Vicuna 13B */
const val VICUNA_13B: String = "vicuna:13b"
// Embedding Models
/** Nomic Embed Text */
const val NOMIC_EMBED_TEXT: String = "nomic-embed-text"
/** All-MiniLM embedding model */
const val ALL_MINILM: String = "all-minilm"
// Vision Models
/** LLaVA 7B vision model */
const val LLAVA_7B: String = "llava:7b"
/** LLaVA 13B vision model */
const val LLAVA_13B: String = "llava:13b"
/** LLaVA 34B vision model */
const val LLAVA_34B: String = "llava:34b"
}
}Ollama Usage Example:
@Action(description = "Generate with local Ollama model")
fun generateWithOllama(prompt: String, context: ActionContext): String {
return context.ai()
.withLlm(LlmOptions.builder()
.model(OllamaModels.LLAMA_31_8B)
.temperature(0.7)
.build())
.generateText(prompt)
}
@Action(description = "Generate code with CodeLlama")
fun generateCodeWithOllama(requirements: String, context: ActionContext): String {
return context.ai()
.withLlm(LlmOptions.builder()
.model(OllamaModels.CODELLAMA_13B)
.temperature(0.3)
.build())
.generateText("Generate code: $requirements")
}Mistral AI model definitions including Mistral, Ministral, Codestral, and Devstral series.
class MistralAiModels {
companion object {
/** Provider identifier */
const val PROVIDER: String = "Mistral AI"
/** Mistral Medium 3.1 */
const val MISTRAL_MEDIUM_31: String = "mistral-medium-2508"
/** Mistral Small 3.2 */
const val MISTRAL_SMALL_32: String = "mistral-small-2506"
/** Ministral 8B */
const val MINISTRAL_8B: String = "ministral-8b-2410"
/** Ministral 3B */
const val MINISTRAL_3B: String = "ministral-3b-2410"
/** Codestral */
const val CODESTRAL: String = "codestral-2508"
/** Devstral Medium 1.0 */
const val DEVSTRAL_MEDIUM_10: String = "devstral-medium-2507"
/** Devstral Small 1.1 */
const val DEVSTRAL_SMALL_11: String = "devstral-small-2507"
/** Mistral Large 2.1 */
const val MISTRAL_LARGE_21: String = "mistral-large-2411"
}
}Mistral Usage Example:
@Action(description = "Generate with Mistral Large")
fun generateWithMistral(prompt: String, context: ActionContext): String {
return context.ai()
.withLlm(LlmOptions.builder()
.model(MistralAiModels.MISTRAL_LARGE_21)
.temperature(0.7)
.build())
.generateText(prompt)
}
@Action(description = "Generate code with Codestral")
fun generateCodeWithCodestral(requirements: String, context: ActionContext): Code {
return context.ai()
.withLlm(LlmOptions.builder()
.model(MistralAiModels.CODESTRAL)
.temperature(0.3)
.build())
.createObject<Code>("Generate: $requirements")
}DeepSeek model definitions including Chat and Reasoner models.
class DeepSeekModels {
companion object {
/** DeepSeek Chat */
const val DEEPSEEK_CHAT: String = "deepseek-chat"
/** DeepSeek Reasoner */
const val DEEPSEEK_REASONER: String = "deepseek-reasoner"
/** Provider identifier */
const val PROVIDER: String = "Deepseek"
}
}DeepSeek Usage Example:
@Action(description = "Generate code with DeepSeek")
fun generateCode(requirements: String, context: ActionContext): Code {
return context.ai()
.withLlm(LlmOptions.builder()
.model(DeepSeekModels.DEEPSEEK_CHAT)
.temperature(0.3)
.build())
.createObject<Code>("Generate code: $requirements")
}
@Action(description = "Reason about complex problem")
fun reasonAbout(problem: String, context: ActionContext): Reasoning {
return context.ai()
.withLlm(LlmOptions.builder()
.model(DeepSeekModels.DEEPSEEK_REASONER)
.temperature(0.7)
.build())
.createObject<Reasoning>("Reason through: $problem")
}Docker-based local model configuration. Models are loaded from Docker endpoints when the "docker" profile is active.
class DockerLocalModels {
companion object {
/** Docker profile identifier */
const val DOCKER_PROFILE: String = "docker"
/** Provider identifier */
const val PROVIDER: String = "Docker"
}
}Model names will be precisely as reported from the Docker endpoint (default: http://localhost:12434/engines/v1/models).
LM Studio local model definitions.
class LmStudioModels {
companion object {
/** Provider identifier */
const val PROVIDER: String = "LM Studio"
}
}LM Studio Usage Example:
@Action(description = "Generate with LM Studio")
fun generateWithLmStudio(prompt: String, context: ActionContext): String {
return context.ai()
.withLlm(LlmOptions.builder()
.endpoint("http://localhost:1234/v1")
.temperature(0.7)
.build())
.generateText(prompt)
}Select models based on requirements.
@Action(description = "Generate with model selection")
fun generateWithSelection(
prompt: String,
requirements: Requirements,
context: ActionContext
): String {
val model = when {
requirements.needsThinking -> AnthropicModels.CLAUDE_SONNET_4_5
requirements.needsVision -> GeminiModels.GEMINI_2_5_PRO
requirements.needsCode -> MistralAiModels.CODESTRAL
requirements.needsReasoning -> DeepSeekModels.DEEPSEEK_REASONER
requirements.costSensitive -> OllamaModels.LLAMA_31_8B
else -> OpenAiModels.GPT_5
}
return context.ai()
.withLlm(LlmOptions.builder()
.model(model)
.build())
.generateText(prompt)
}
data class Requirements(
val needsThinking: Boolean = false,
val needsVision: Boolean = false,
val needsCode: Boolean = false,
val needsReasoning: Boolean = false,
val costSensitive: Boolean = false
)Use multiple models for consensus.
@Action(description = "Get consensus from multiple models")
fun getConsensus(
question: String,
context: ActionContext
): ConsensusResult {
// Get responses from multiple models
val claudeResponse = context.ai()
.withLlm(LlmOptions.model(AnthropicModels.CLAUDE_41_OPUS))
.generateText(question)
val gpt5Response = context.ai()
.withLlm(LlmOptions.model(OpenAiModels.GPT_5))
.generateText(question)
val geminiResponse = context.ai()
.withLlm(LlmOptions.model(GoogleGenAiModels.GEMINI_2_5_PRO))
.generateText(question)
// Build consensus
return context.promptRunner()
.createObject<ConsensusResult>("""
Three AI models answered a question. Build consensus:
Claude: $claudeResponse
GPT-5: $gpt5Response
Gemini: $geminiResponse
Synthesize a consensus answer.
""")
}
data class ConsensusResult(
val answer: String,
val confidence: Double,
val agreements: List<String>,
val disagreements: List<String>
)Configure model-specific parameters.
@Action(description = "Use model-specific features")
fun useModelFeatures(prompt: String, context: ActionContext): String {
// Claude with thinking
val claudeResult = context.ai()
.withLlm(LlmOptions.builder()
.model(AnthropicModels.CLAUDE_SONNET_4_5)
.temperature(0.7)
.build())
.thinking()
.generateText(prompt)
logger.info("Claude thinking: ${claudeResult.thinking}")
// DeepSeek with reasoning
val deepseekResult = context.ai()
.withLlm(LlmOptions.builder()
.model(DeepSeekModels.DEEPSEEK_REASONER)
.temperature(0.7)
.maxTokens(8000)
.build())
.generateText(prompt)
return claudeResult.content
}interface LlmOptions {
val model: String
val temperature: Double
val maxTokens: Int
val topP: Double
val endpoint: String?
companion object {
fun builder(): Builder
fun model(model: String): LlmOptions
}
interface Builder {
fun model(model: String): Builder
fun temperature(temperature: Double): Builder
fun maxTokens(maxTokens: Int): Builder
fun topP(topP: Double): Builder
fun endpoint(endpoint: String): Builder
fun build(): LlmOptions
}
}Install with Tessl CLI
npx tessl i tessl/maven-com-embabel-agent--embabel-agent-apidocs