Fluent DSL and Kotlin DSL for building autonomous agents with planning capabilities on the JVM, featuring annotation-based and programmatic configuration for agentic flows with Spring Boot integration
The PromptRunner API provides a fluent interface for interacting with Large Language Models. It supports text generation, structured object creation, template rendering, extended reasoning (thinking), and streaming responses.
The Ai interface is the main entry point for accessing LLM and embedding capabilities. It provides factory methods to create PromptRunner and EmbeddingService instances with various model selection strategies.
interface Ai {
/** Create a PromptRunner with a specific model by name */
fun withLlm(model: String): PromptRunner
/** Create a PromptRunner with a specific model by role */
fun withLlmByRole(role: String): PromptRunner
/** Create a PromptRunner with automatic model selection */
fun withAutoLlm(): PromptRunner
/** Create a PromptRunner with the default model */
fun withDefaultLlm(): PromptRunner
/** Create a PromptRunner with fallback chain of models */
fun withFirstAvailableLlmOf(vararg llms: String): PromptRunner
/** Create an EmbeddingService with a specific model by name */
fun withEmbeddingService(model: String): EmbeddingService
/** Create an EmbeddingService with model selection criteria */
fun withEmbeddingService(criteria: ModelSelectionCriteria): EmbeddingService
/** Create an EmbeddingService with the default model */
fun withDefaultEmbeddingService(): EmbeddingService
}Basic Usage:
@Action(description = "Generate text with specific model")
fun generateWithModel(prompt: String, context: ActionContext): String {
val ai = context.ai()
return ai.withLlm("claude-3-5-sonnet-20241022")
.generateText(prompt)
}
@Action(description = "Generate with fallback models")
fun generateWithFallback(prompt: String, context: ActionContext): String {
val ai = context.ai()
// Try models in order until one succeeds
return ai.withFirstAvailableLlmOf(
"claude-3-5-sonnet-20241022",
"claude-3-haiku-20240307",
"gpt-4"
).generateText(prompt)
}
@Action(description = "Use role-based model selection")
fun generateWithRole(prompt: String, context: ActionContext): String {
val ai = context.ai()
// Use model assigned to "creative-writing" role
return ai.withLlmByRole("creative-writing")
.generateText(prompt)
}The AiBuilder interface allows creating custom Ai instances with specific configuration options.
interface AiBuilder : LlmVerbosity {
/** Build the Ai instance */
fun ai(): Ai
/** Configure process options */
fun withProcessOptions(options: ProcessOptions): AiBuilder
/** Enable/disable prompt display */
fun withShowPrompts(show: Boolean): AiBuilder
/** Enable/disable LLM response display */
fun withShowLlmResponses(show: Boolean): AiBuilder
}Usage Example:
@Action(description = "Create custom AI instance")
fun customAiExample(context: ActionContext): String {
val customAi = AiBuilder()
.withShowPrompts(true)
.withShowLlmResponses(true)
.withProcessOptions(ProcessOptions.builder()
.timeout(Duration.ofSeconds(30))
.build())
.ai()
return customAi.withDefaultLlm()
.generateText("Analyze this with verbose logging")
}Sealed interface for specifying how to select LLM models.
sealed interface ModelSelectionCriteria {
/** Use the default model */
object DefaultModelSelectionCriteria : ModelSelectionCriteria
/** Use automatic model selection based on task */
object AutoModelSelectionCriteria : ModelSelectionCriteria
/** Select model by name */
data class ByNameModelSelectionCriteria(val model: String) : ModelSelectionCriteria
/** Select model by role */
data class ByRoleModelSelectionCriteria(val role: String) : ModelSelectionCriteria
/** Try models in order until one succeeds */
data class FallbackByNameModelSelectionCriteria(
val models: List<String>
) : ModelSelectionCriteria
companion object {
/** Create criteria to select by name */
fun byName(model: String): ByNameModelSelectionCriteria
/** Create criteria to select by role */
fun byRole(role: String): ByRoleModelSelectionCriteria
/** Get default criteria */
fun default(): DefaultModelSelectionCriteria
/** Get auto-selection criteria */
fun auto(): AutoModelSelectionCriteria
/** Create fallback chain */
fun fallback(vararg models: String): FallbackByNameModelSelectionCriteria
}
}Usage Example:
@Action(description = "Use model selection criteria")
fun criteriaExample(context: ActionContext): String {
val ai = context.ai()
// Use criteria for embedding service
val embeddingService = ai.withEmbeddingService(
ModelSelectionCriteria.byName("text-embedding-3-large")
)
val embedding = embeddingService.embed("Example text")
// Use auto-selection
return ai.withLlm(ModelSelectionCriteria.auto())
.generateText("Analyze this text")
}The EmbeddingService interface provides methods for generating vector embeddings from text.
interface EmbeddingService {
/** Generate embedding for a single text */
fun embed(text: String): Embedding
/** Generate embeddings for multiple texts */
fun embed(texts: List<String>): List<Embedding>
/** Generate embedding with options */
fun embedWithOptions(text: String, options: EmbeddingOptions): Embedding
/** Get the model name used by this service */
val modelName: String
/** Get embedding dimensions */
val dimensions: Int
}
/** Type alias for embedding vectors */
typealias Embedding = FloatArrayEmbedding Usage:
@Action(description = "Generate embeddings for similarity search")
fun generateEmbeddings(
documents: List<String>,
context: ActionContext
): List<Embedding> {
val ai = context.ai()
val embeddingService = ai.withEmbeddingService("text-embedding-3-large")
return embeddingService.embed(documents)
}
@Action(description = "Find similar documents")
fun findSimilar(
query: String,
documents: List<String>,
context: ActionContext
): List<String> {
val ai = context.ai()
val embeddingService = ai.withDefaultEmbeddingService()
// Generate query embedding
val queryEmbedding = embeddingService.embed(query)
// Generate document embeddings
val docEmbeddings = embeddingService.embed(documents)
// Calculate similarities and return top matches
return documents.zip(docEmbeddings)
.map { (doc, embedding) ->
doc to cosineSimilarity(queryEmbedding, embedding)
}
.sortedByDescending { it.second }
.take(5)
.map { it.first }
}
private fun cosineSimilarity(a: FloatArray, b: FloatArray): Float {
var dotProduct = 0f
var normA = 0f
var normB = 0f
for (i in a.indices) {
dotProduct += a[i] * b[i]
normA += a[i] * a[i]
normB += b[i] * b[i]
}
return dotProduct / (sqrt(normA) * sqrt(normB))
}Central interface for all LLM operations.
interface PromptRunner : LlmUse, PromptRunnerOperations {
/** Tool objects available to LLM */
val toolObjects: List<ToolObject>
/** Messages in the conversation */
val messages: List<Message>
/** Images for multimodal prompts */
val images: List<AgentImage>
/** Configure LLM options */
fun withLlm(llm: LlmOptions): PromptRunner
/** Add message to conversation */
fun withMessage(message: Message): PromptRunner
/** Add multiple messages */
fun withMessages(messages: List<Message>): PromptRunner
/** Add image for multimodal input */
fun withImage(image: Image): PromptRunner
/** Add multiple images */
fun withImages(images: List<Image>): PromptRunner
/** Add tool group */
fun withToolGroup(group: ToolGroup): PromptRunner
/** Add single tool */
fun withTool(tool: Tool): PromptRunner
/** Add tool object */
fun withToolObject(obj: ToolObject): PromptRunner
/** Set interaction ID for tracking */
fun withInteractionId(interactionId: InteractionId): PromptRunner
/** Set request ID */
fun withId(id: String): PromptRunner
/** Add LLM reference (documentation, context) */
fun withReference(reference: LlmReference): PromptRunner
/** Add multiple references */
fun withReferences(references: List<LlmReference>): PromptRunner
/** Set system prompt */
fun withSystemPrompt(systemPrompt: String): PromptRunner
/** Add prompt contributor */
fun withPromptContributor(contributor: PromptContributor): PromptRunner
/** Add multiple prompt contributors */
fun withPromptContributors(contributors: List<PromptContributor>): PromptRunner
/** Add contextual prompt contributor */
fun withContextualPromptContributor(contributor: ContextualPromptElement): PromptRunner
/** Add multiple contextual contributors */
fun withContextualPromptContributors(contributors: List<ContextualPromptElement>): PromptRunner
/** Enable/disable example generation */
fun withGenerateExamples(generateExamples: Boolean): PromptRunner
/** Add guardrails for validation */
fun withGuardRails(vararg guardRails: GuardRail): PromptRunner
/** Add handoffs (experimental) */
fun withHandoffs(vararg outputTypes: Class<*>): PromptRunner
/** Add subagents (experimental) */
fun withSubagents(vararg subagents: Subagent): PromptRunner
/** Check if streaming is supported */
fun supportsStreaming(): Boolean
/** Get streaming capability */
fun streaming(): StreamingCapability
/** Check if extended thinking is supported */
fun supportsThinking(): Boolean
/** Get thinking capability */
fun thinking(): Thinking
/** Get object creation interface */
fun <T> creating(outputClass: Class<T>): Creating<T>
/** Get template rendering interface */
fun rendering(templateName: String): Rendering
/** Generate text from prompt */
fun generateText(prompt: String): String
/** Generate text from messages */
fun generateText(messages: List<Message>): String
/** Generate text from multimodal content */
fun generateText(content: MultimodalContent): String
/** Respond to messages */
fun respond(messages: List<Message>): AssistantMessage
/** Respond to multimodal content */
fun respond(content: MultimodalContent): AssistantMessage
}Basic Usage:
@Action(description = "Generate product description")
fun generateDescription(product: Product, context: ActionContext): String {
return context.promptRunner()
.withSystemPrompt("You are a professional copywriter.")
.generateText("""
Write a compelling product description for:
Name: ${product.name}
Features: ${product.features.joinToString()}
""")
}Create structured objects from prompts.
interface Creating<T> {
/** Add example for few-shot learning */
fun withExample(description: String, value: T): Creating<T>
/** Add creation example */
fun withExample(example: CreationExample<T>): Creating<T>
/** Add multiple examples */
fun withExamples(examples: Iterable<CreationExample<T>>): Creating<T>
/** Add multiple examples (vararg) */
fun withExamples(vararg examples: CreationExample<T>): Creating<T>
/** Filter properties to include */
fun withPropertyFilter(filter: Predicate<String>): Creating<T>
/** Include only specific properties */
fun withProperties(vararg properties: String): Creating<T>
/** Exclude specific properties */
fun withoutProperties(vararg properties: String): Creating<T>
/** Enable/disable validation */
fun withValidation(validate: Boolean): Creating<T>
/** Disable validation */
fun withoutValidation(): Creating<T>
/** Create object from prompt */
fun fromPrompt(prompt: String): T
/** Create object from template */
fun fromTemplate(templateName: String, model: Map<String, Any>): T
/** Create object from messages */
fun fromMessages(messages: List<Message>): T
}
data class CreationExample<T>(
val description: String,
val value: T
)Object Creation Example:
data class CustomerInsight(
val sentiment: String,
val keyThemes: List<String>,
val actionItems: List<String>,
val urgency: String
)
@Action(description = "Analyze customer feedback")
fun analyzeFeedback(feedback: CustomerFeedback, context: ActionContext): CustomerInsight {
return context.promptRunner()
.creating(CustomerInsight::class.java)
.withExample(
"Positive feedback about fast delivery",
CustomerInsight(
sentiment = "positive",
keyThemes = listOf("delivery", "speed"),
actionItems = listOf("maintain delivery standards"),
urgency = "low"
)
)
.withValidation(true)
.fromPrompt("""
Analyze this customer feedback:
${feedback.text}
Extract sentiment, key themes, action items, and urgency level.
""")
}Convenient extension functions with reified generics.
/** Create object with reified type */
inline fun <reified T> PromptRunner.createObject(prompt: String): T
/** Alias for createObject */
inline fun <reified T> PromptRunner.create(prompt: String): T
/** Create object that may be null */
inline fun <reified T> PromptRunner.createObjectIfPossible(prompt: String): T?Extension Function Example:
@Action(description = "Extract entities from text")
fun extractEntities(text: String, context: ActionContext): EntityList {
// Using reified generic - no need to pass class
return context.promptRunner().createObject<EntityList>("""
Extract all named entities from this text:
$text
""")
}
data class EntityList(
val people: List<String>,
val organizations: List<String>,
val locations: List<String>
)Use extended reasoning for complex tasks (available on models like Claude Sonnet).
interface Thinking {
/** Generate text with thinking */
infix fun generateText(prompt: String): ThinkingResponse<String>
/** Create object with thinking */
fun <T> createObject(prompt: String, outputClass: Class<T>): ThinkingResponse<T>
/** Create object (nullable) with thinking */
fun <T> createObjectIfPossible(prompt: String, outputClass: Class<T>): ThinkingResponse<T?>
/** Create object from messages with thinking */
fun <T> createObject(messages: List<Message>, outputClass: Class<T>): ThinkingResponse<T>
/** Create object (nullable) from messages with thinking */
fun <T> createObjectIfPossible(messages: List<Message>, outputClass: Class<T>): ThinkingResponse<T?>
/** Generate text from multimodal content with thinking */
fun generateText(content: MultimodalContent): ThinkingResponse<String>
/** Create object from multimodal content with thinking */
fun <T> createObject(content: MultimodalContent, outputClass: Class<T>): ThinkingResponse<T>
/** Create object (nullable) from multimodal content with thinking */
fun <T> createObjectIfPossible(content: MultimodalContent, outputClass: Class<T>): ThinkingResponse<T?>
/** Respond with assistant message */
fun respond(content: MultimodalContent): ThinkingResponse<AssistantMessage>
/** Respond to messages */
fun respond(messages: List<Message>): ThinkingResponse<AssistantMessage>
/** Evaluate condition with thinking */
fun evaluateCondition(
condition: String,
context: String,
confidenceThreshold: ZeroToOne = 0.8
): ThinkingResponse<Boolean>
}
/** Response from extended thinking operations */
data class ThinkingResponse<T>(
/** The generated content/result */
val content: T,
/** The thinking process/reasoning used by the model */
val thinking: String
) {
/** Map the content while preserving thinking */
fun <R> map(transform: (T) -> R): ThinkingResponse<R> =
ThinkingResponse(transform(content), thinking)
/** Get just the content */
fun get(): T = content
}Thinking Examples:
@Action(description = "Solve complex problem with extended thinking")
fun solveComplexProblem(problem: Problem, context: ActionContext): Solution {
val response = context.promptRunner()
.thinking()
.createObject("""
Solve this complex problem:
${problem.description}
Requirements:
- Consider edge cases
- Optimize for performance
- Ensure correctness
""".trimIndent(), Solution::class.java)
// Log the thinking process
logger.info("LLM thinking: ${response.thinking}")
return response.content
}
@Action(description = "Analyze with thinking and transform result")
fun analyzeWithThinking(data: String, context: ActionContext): AnalysisReport {
val thinkingResponse = context.promptRunner()
.thinking()
.createObject("""
Perform deep analysis of:
$data
""", Analysis::class.java)
// Access both thinking and content
logger.info("Reasoning: ${thinkingResponse.thinking}")
// Transform the result while preserving thinking context
val reportResponse = thinkingResponse.map { analysis ->
AnalysisReport(
analysis = analysis,
reasoning = thinkingResponse.thinking,
timestamp = Instant.now()
)
}
return reportResponse.content
}
@Action(description = "Generate text with thinking")
fun generateWithReasoning(prompt: String, context: ActionContext): TextWithReasoning {
val response = context.promptRunner()
.thinking()
.generateText(prompt)
return TextWithReasoning(
text = response.content,
reasoning = response.thinking
)
}
@Action(description = "Evaluate complex condition with thinking")
fun evaluateComplexCondition(
condition: String,
context: ActionContext
): Boolean {
val response = context.promptRunner()
.thinking()
.evaluateCondition(
condition = condition,
context = "Evaluate based on current business rules",
confidenceThreshold = 0.9
)
if (response.content) {
logger.info("Condition met. Reasoning: ${response.thinking}")
} else {
logger.info("Condition not met. Reasoning: ${response.thinking}")
}
return response.content
}Render templates with LLM-generated content.
interface Rendering {
/** Create object from template */
fun <T> createObject(outputClass: Class<T>, model: Map<String, Any>): T
/** Generate text from template */
fun generateText(model: Map<String, Any>): String
/** Respond in conversation with template as system prompt */
fun respondWithSystemPrompt(conversation: Conversation, model: Map<String, Any> = emptyMap()): AssistantMessage
}Template Rendering Example:
@Action(description = "Generate email from template")
fun generateEmail(order: Order, context: ActionContext): Email {
return context.promptRunner()
.rendering("order-confirmation")
.createObject(Email::class.java, mapOf(
"order" to order,
"customer" to order.customer,
"items" to order.items
))
}Combine text and images in prompts.
data class MultimodalContent(
val text: String,
val images: List<Image>
)
class MultimodalContentBuilder {
fun withText(text: String): MultimodalContentBuilder
fun withImage(image: Image): MultimodalContentBuilder
fun withImages(images: List<Image>): MultimodalContentBuilder
fun build(): MultimodalContent
}Multimodal Example:
@Action(description = "Analyze product image")
fun analyzeProductImage(
imageUrl: String,
context: ActionContext
): ProductAnalysis {
val image = Image.fromUrl(imageUrl)
val content = MultimodalContent(
text = "Analyze this product image and extract details",
images = listOf(image)
)
return context.promptRunner()
.thinking()
.createObject(content, ProductAnalysis::class.java)
.content
}
data class ProductAnalysis(
val productType: String,
val colors: List<String>,
val condition: String,
val estimatedValue: Double
)Configure LLM behavior with options.
@Action(description = "Generate creative content")
fun generateCreativeContent(topic: String, context: ActionContext): String {
return context.promptRunner()
.withLlm(LlmOptions.builder()
.temperature(0.9)
.maxTokens(1000)
.topP(0.95)
.build())
.withSystemPrompt("You are a creative writer.")
.generateText("Write a creative story about: $topic")
}Provide tools to the LLM for function calling.
@Action(description = "Answer question with tools")
fun answerWithTools(question: String, context: ActionContext): String {
val calculator = Tool.create(
name = "calculator",
description = "Perform mathematical calculations",
handler = { input ->
val result = calculate(input)
Tool.Result.text(result.toString())
}
)
return context.promptRunner()
.withTool(calculator)
.withToolGroup(weatherToolGroup)
.generateText("Answer this question: $question")
}Add validation guardrails to LLM interactions.
@Action(description = "Generate safe content")
fun generateSafeContent(prompt: String, context: ActionContext): String {
val contentFilter = object : AssistantMessageGuardRail {
override fun validate(message: AssistantMessage): ValidationResult {
if (containsInappropriateContent(message.content)) {
throw GuardRailViolationException(
"Content contains inappropriate material",
this
)
}
return ValidationResult.valid()
}
}
return context.promptRunner()
.withGuardRails(contentFilter)
.generateText(prompt)
}Add dynamic context to prompts.
class CurrentDateContributor : PromptContributor {
override fun contribute(context: OperationContext): String {
return "Current date: ${LocalDate.now()}"
}
}
@Action(description = "Generate time-aware content")
fun generateTimeAwareContent(context: ActionContext): String {
return context.promptRunner()
.withPromptContributor(CurrentDateContributor())
.generateText("What should I focus on today?")
}Add external references for RAG (Retrieval-Augmented Generation).
@Action(description = "Answer question with documentation")
fun answerFromDocs(
question: String,
context: ActionContext
): String {
return context.promptRunner()
.withReference(LlmReferenceProviders.webpage("https://docs.example.com"))
.withReference(LlmReferenceProviders.directory("/docs/api"))
.withReference(LlmReferenceProviders.literalText("""
Additional context:
- API version: 2.0
- Environment: production
"""))
.generateText("Answer this question: $question")
}Configuration options for LLM behavior.
data class LlmOptions(
/** Model name/identifier */
val model: String,
/** Temperature (0.0-2.0): controls randomness. Lower = more deterministic, Higher = more creative */
val temperature: Double = 1.0,
/** Maximum tokens to generate */
val maxTokens: Int = 4096,
/** Top-p sampling (0.0-1.0): nucleus sampling threshold */
val topP: Double = 1.0,
/** Top-k sampling: limit to top k tokens */
val topK: Int? = null,
/** Frequency penalty (0.0-2.0): penalize repeated tokens */
val frequencyPenalty: Double = 0.0,
/** Presence penalty (0.0-2.0): penalize tokens based on presence */
val presencePenalty: Double = 0.0,
/** Stop sequences: stop generation when encountered */
val stopSequences: List<String> = emptyList(),
/** Seed for reproducible generation */
val seed: Int? = null
) {
companion object {
fun builder(): LlmOptionsBuilder
}
}
class LlmOptionsBuilder {
fun model(model: String): LlmOptionsBuilder
fun temperature(temperature: Double): LlmOptionsBuilder
fun maxTokens(maxTokens: Int): LlmOptionsBuilder
fun topP(topP: Double): LlmOptionsBuilder
fun topK(topK: Int): LlmOptionsBuilder
fun frequencyPenalty(penalty: Double): LlmOptionsBuilder
fun presencePenalty(penalty: Double): LlmOptionsBuilder
fun stopSequences(sequences: List<String>): LlmOptionsBuilder
fun seed(seed: Int): LlmOptionsBuilder
fun build(): LlmOptions
}LlmOptions Usage:
@Action(description = "Generate with custom options")
fun generateWithOptions(prompt: String, context: ActionContext): String {
return context.promptRunner()
.withLlm(LlmOptions.builder()
.model("claude-3-5-sonnet-20241022")
.temperature(0.7)
.maxTokens(2000)
.topP(0.9)
.frequencyPenalty(0.5)
.stopSequences(listOf("END", "STOP"))
.build())
.generateText(prompt)
}
@Action(description = "Generate deterministically")
fun generateDeterministic(prompt: String, context: ActionContext): String {
return context.promptRunner()
.withLlm(LlmOptions.builder()
.temperature(0.0)
.seed(42)
.build())
.generateText(prompt)
}Interface for controlling logging verbosity of LLM operations.
interface LlmVerbosity {
/** Enable/disable prompt display */
fun withShowPrompts(show: Boolean): LlmVerbosity
/** Enable/disable LLM response display */
fun withShowLlmResponses(show: Boolean): LlmVerbosity
/** Get current prompt display setting */
val showPrompts: Boolean
/** Get current response display setting */
val showLlmResponses: Boolean
}Class for providing examples in few-shot learning scenarios.
/** Example for teaching the LLM through demonstration */
data class CreationExample<T>(
/** Description of what this example demonstrates */
val description: String,
/** The example value */
val value: T
) {
companion object {
/** Create an example with description */
fun <T> of(description: String, value: T): CreationExample<T> =
CreationExample(description, value)
}
}CreationExample Usage:
@Action(description = "Extract structured data with examples")
fun extractWithExamples(text: String, context: ActionContext): ExtractedData {
return context.promptRunner()
.creating(ExtractedData::class.java)
.withExamples(
CreationExample.of(
"Invoice with single item",
ExtractedData(
type = "invoice",
amount = 100.00,
items = listOf("Widget")
)
),
CreationExample.of(
"Receipt with multiple items",
ExtractedData(
type = "receipt",
amount = 250.00,
items = listOf("Item A", "Item B", "Item C")
)
)
)
.fromPrompt("Extract data from: $text")
}/** Type alias for values constrained to 0.0-1.0 range */
typealias ZeroToOne = Double
interface Image {
val url: String
val data: ByteArray
companion object {
fun fromUrl(url: String): Image
fun fromData(data: ByteArray): Image
}
}
interface Message {
val role: MessageRole
val content: String
}
enum class MessageRole {
USER,
ASSISTANT,
SYSTEM,
TOOL
}
interface PromptContributor {
fun contribute(context: OperationContext): String
}
interface ContextualPromptElement : PromptElement {
fun contributeContext(context: OperationContext): String
}
interface GuardRail
interface ToolObject {
fun tools(): List<Tool>
companion object {
fun from(instance: Any): ToolObject
}
}Create a custom AI instance with detailed logging for debugging.
@Action(description = "Debug LLM interactions")
fun debugLlmInteraction(prompt: String, context: ActionContext): String {
val debugAi = AiBuilder()
.withShowPrompts(true)
.withShowLlmResponses(true)
.ai()
return debugAi.withDefaultLlm()
.withSystemPrompt("You are a helpful assistant.")
.generateText(prompt)
}Use multiple models with automatic fallback for resilient applications.
@Action(description = "Generate with fallback models")
fun generateWithFallback(
prompt: String,
context: ActionContext
): GenerationResult {
val ai = context.ai()
try {
// Try primary model first
val result = ai.withFirstAvailableLlmOf(
"claude-3-5-sonnet-20241022", // Primary: most capable
"claude-3-haiku-20240307", // Fallback 1: faster
"gpt-4", // Fallback 2: alternative provider
"gpt-3.5-turbo" // Fallback 3: fastest
).generateText(prompt)
return GenerationResult(
text = result,
modelUsed = "succeeded with fallback chain"
)
} catch (e: Exception) {
logger.error("All models failed", e)
throw e
}
}Implement semantic search using embedding vectors.
@Action(description = "Semantic search across documents")
fun semanticSearch(
query: String,
documents: List<Document>,
context: ActionContext
): List<Document> {
val ai = context.ai()
val embeddingService = ai.withEmbeddingService("text-embedding-3-large")
// Generate query embedding
val queryEmbedding = embeddingService.embed(query)
// Generate embeddings for all documents
val documentTexts = documents.map { it.content }
val documentEmbeddings = embeddingService.embed(documentTexts)
// Calculate cosine similarity for each document
val similarities = documentEmbeddings.map { docEmbedding ->
cosineSimilarity(queryEmbedding, docEmbedding)
}
// Return top 5 most similar documents
return documents.zip(similarities)
.sortedByDescending { it.second }
.take(5)
.map { it.first }
}
private fun cosineSimilarity(a: FloatArray, b: FloatArray): Float {
require(a.size == b.size) { "Vectors must have same dimensions" }
var dotProduct = 0f
var normA = 0f
var normB = 0f
for (i in a.indices) {
dotProduct += a[i] * b[i]
normA += a[i] * a[i]
normB += b[i] * b[i]
}
return dotProduct / (sqrt(normA) * sqrt(normB))
}Configure different models for different roles in your application.
// Configuration (e.g., in application.conf)
// llm.roles {
// creative-writing = "claude-3-5-sonnet-20241022"
// data-extraction = "claude-3-haiku-20240307"
// analysis = "gpt-4"
// }
@Action(description = "Generate creative content using role-based model")
fun generateCreativeContent(topic: String, context: ActionContext): String {
val ai = context.ai()
return ai.withLlmByRole("creative-writing")
.withSystemPrompt("You are a creative writer.")
.withLlm(LlmOptions.builder()
.temperature(0.9)
.maxTokens(2000)
.build())
.generateText("Write a story about: $topic")
}
@Action(description = "Extract data using role-based model")
fun extractData(document: String, context: ActionContext): ExtractedData {
val ai = context.ai()
return ai.withLlmByRole("data-extraction")
.creating(ExtractedData::class.java)
.withValidation(true)
.fromPrompt("Extract structured data from: $document")
}Combine multiple LLM operations with extended thinking for complex workflows.
@Action(description = "Analyze and generate report with reasoning")
fun analyzeAndReport(
data: List<DataPoint>,
context: ActionContext
): DetailedReport {
val ai = context.ai()
// Step 1: Analyze data with extended thinking
val analysisResponse = ai.withDefaultLlm()
.thinking()
.createObject("""
Analyze this dataset and identify:
- Key patterns and trends
- Anomalies or outliers
- Statistical significance
Data: ${data.joinToString()}
""".trimIndent(), DataAnalysis::class.java)
logger.info("Analysis reasoning: ${analysisResponse.thinking}")
// Step 2: Generate embeddings for semantic grouping
val embeddingService = ai.withDefaultEmbeddingService()
val dataDescriptions = data.map { it.description }
val embeddings = embeddingService.embed(dataDescriptions)
// Step 3: Generate final report with insights
val reportResponse = ai.withDefaultLlm()
.thinking()
.createObject("""
Generate a comprehensive report based on:
Analysis: ${analysisResponse.content}
Reasoning: ${analysisResponse.thinking}
Include:
- Executive summary
- Detailed findings
- Recommendations
- Risk assessment
""".trimIndent(), DetailedReport::class.java)
logger.info("Report generation reasoning: ${reportResponse.thinking}")
return reportResponse.content.copy(
analysisReasoning = analysisResponse.thinking,
reportReasoning = reportResponse.thinking
)
}Use examples dynamically based on context.
@Action(description = "Extract entities with dynamic examples")
fun extractEntitiesWithDynamicExamples(
text: String,
domain: String,
context: ActionContext
): EntityExtraction {
val ai = context.ai()
val exampleProvider = context.getBean(ExampleProvider::class.java)
// Get domain-specific examples
val examples = exampleProvider.getExamplesForDomain(domain)
val creator = ai.withDefaultLlm()
.creating(EntityExtraction::class.java)
// Add examples dynamically
examples.forEach { example ->
creator.withExample(
CreationExample(
description = example.description,
value = example.extraction
)
)
}
return creator
.withValidation(true)
.fromPrompt("""
Extract entities from this $domain text:
$text
""")
}Use thinking mode for complex conditional evaluation.
@Action(description = "Make decision with reasoning")
fun makeDecision(
scenario: BusinessScenario,
context: ActionContext
): Decision {
val ai = context.ai()
// Evaluate multiple conditions with thinking
val approvalResponse = ai.withDefaultLlm()
.thinking()
.evaluateCondition(
condition = "Should this request be automatically approved?",
context = """
Request: ${scenario.request}
User role: ${scenario.userRole}
Amount: ${scenario.amount}
Risk factors: ${scenario.riskFactors.joinToString()}
Approval criteria:
- Amount under $10,000 and user is manager
- No high-risk factors present
- User has approval history
""",
confidenceThreshold = 0.9
)
if (approvalResponse.content) {
logger.info("Auto-approved. Reasoning: ${approvalResponse.thinking}")
return Decision(
approved = true,
reasoning = approvalResponse.thinking,
requiresReview = false
)
} else {
logger.info("Requires review. Reasoning: ${approvalResponse.thinking}")
return Decision(
approved = false,
reasoning = approvalResponse.thinking,
requiresReview = true
)
}
}Advanced example combining multiple features for a robust agent.
@Action(description = "Research and answer with tools and references")
fun researchAndAnswer(
question: String,
context: ActionContext
): ResearchAnswer {
val ai = context.ai()
// Setup tools
val searchTool = createSearchTool()
val calculatorTool = createCalculatorTool()
val databaseTool = createDatabaseTool()
// Setup references
val references = listOf(
LlmReferenceProviders.webpage("https://docs.example.com"),
LlmReferenceProviders.directory("/docs/knowledge-base"),
LlmReferenceProviders.literalText("""
Company policies:
- Always cite sources
- Verify calculations
- Cross-reference data
""")
)
// Generate answer with thinking
val response = ai.withDefaultLlm()
.withSystemPrompt("You are a research assistant. Use tools and references to provide accurate answers.")
.withTool(searchTool)
.withTool(calculatorTool)
.withTool(databaseTool)
.withReferences(references)
.thinking()
.createObject("""
Research and answer this question:
$question
Requirements:
- Use tools to gather information
- Reference provided documentation
- Show your reasoning
- Cite sources
""".trimIndent(), ResearchAnswer::class.java)
return response.content.copy(
reasoning = response.thinking
)
}Install with Tessl CLI
npx tessl i tessl/maven-com-embabel-agent--embabel-agent-apidocs