Common AI framework utilities for the Embabel Agent system including LLM configuration, output converters, prompt contributors, and embedding service abstractions.
—
Exception types and error recovery strategies for Embabel Agent Common.
Thrown when no model matches the selection criteria.
class NoSuitableModelException(
criteria: ModelSelectionCriteria,
modelNames: List<String>
) : RuntimeException {
companion object {
@JvmStatic
fun forModels(
criteria: ModelSelectionCriteria,
models: List<AiModel<*>>
): NoSuitableModelException
}
}When thrown:
Usage:
try {
val model = selectModel(criteria, availableModels)
} catch (e: NoSuitableModelException) {
logger.error("No suitable model: ${e.message}")
// Fallback to default
val model = selectModel(ModelSelectionCriteria.PlatformDefault, availableModels)
}Creating manually:
val criteria = ModelSelectionCriteria.byName("nonexistent-model")
val available = listOf("gpt-4", "claude-3-opus")
// Direct construction
throw NoSuitableModelException(criteria, available)
// From AiModel list
val aiModels: List<AiModel<*>> = // ...
throw NoSuitableModelException.forModels(criteria, aiModels)Output converters return null instead of throwing exceptions for parsing errors.
val result = converter.convert(llmResponse)
if (result == null) {
// Parsing failed - handle error
logger.error("Failed to parse LLM response: $llmResponse")
// Options:
// 1. Retry with different prompt
// 2. Use default value
// 3. Throw custom exception
// 4. Return error response to user
}Common causes:
Streaming converters skip invalid lines and continue processing.
val stream = converter.convertStream(jsonlResponse)
stream.subscribe(
{ event -> processEvent(event) },
{ error ->
// Stream-level error (rare)
logger.error("Stream failed", error)
},
{ logger.info("Stream complete") }
)Per-line errors are silently skipped - invalid JSON lines don't break the stream.
fun selectModelWithFallback(preferred: String): LlmOptions {
return try {
LlmOptions.withModel(preferred)
} catch (e: NoSuitableModelException) {
logger.warn("Preferred model $preferred unavailable, using fallback")
LlmOptions.withDefaultLlm()
}
}Built-in fallback chain:
// Automatically tries models in order
val options = LlmOptions.withFirstAvailableLlmOf(
"gpt-4-turbo",
"gpt-4",
"gpt-3.5-turbo"
)fun parseWithRetry(
llmClient: LLMClient,
converter: JacksonOutputConverter<Person>,
prompt: String,
maxRetries: Int = 3
): Person? {
repeat(maxRetries) { attempt ->
val response = llmClient.call(prompt)
val result = converter.convert(response)
if (result != null) {
return result
}
logger.warn("Parse attempt ${attempt + 1} failed, retrying...")
Thread.sleep(1000 * (attempt + 1)) // Exponential backoff
}
return null
}data class PartialPerson(
val name: String?,
val age: Int?,
val email: String?
)
fun parsePartial(response: String): PartialPerson? {
// All fields nullable - accepts partial data
val partial = converter.convert(response)
// Validate minimum requirements
return partial?.takeIf { it.name != null }
}data class PersonWithDefaults(
val name: String = "Unknown",
val age: Int = 0,
val email: String = ""
)
// Always succeeds with defaults for missing fields
val person = converter.convert(response) ?: PersonWithDefaults()fun processStreamWithRecovery(jsonl: String): List<Event> {
val results = mutableListOf<Event>()
val errors = mutableListOf<String>()
val stream = converter.convertStream(jsonl)
stream.subscribe(
{ event -> results.add(event) },
{ error -> errors.add(error.message ?: "Unknown") },
{
if (errors.isNotEmpty()) {
logger.warn("Stream had ${errors.size} errors: $errors")
}
logger.info("Processed ${results.size} events")
}
)
return results
}fun validateResponse(response: String): Boolean {
return response.isNotBlank() &&
(response.contains("{") || response.contains("[")) &&
response.length < 1_000_000 // Sanity check
}
if (validateResponse(llmResponse)) {
val result = converter.convert(llmResponse)
} else {
logger.error("Invalid response format")
}data class Person(val name: String, val age: Int, val email: String)
fun validatePerson(person: Person): Boolean {
return person.name.isNotBlank() &&
person.age in 0..150 &&
person.email.contains("@")
}
val person = converter.convert(response)
if (person != null && validatePerson(person)) {
// Use validated person
} else {
logger.error("Invalid person data")
}import com.fasterxml.jackson.databind.JsonNode
import com.github.fge.jsonschema.core.report.ProcessingReport
import com.github.fge.jsonschema.main.JsonSchemaFactory
fun validateAgainstSchema(json: String, schema: String): Boolean {
val factory = JsonSchemaFactory.byDefault()
val schemaNode = mapper.readTree(schema)
val jsonNode = mapper.readTree(json)
val validator = factory.getJsonSchema(schemaNode)
val report: ProcessingReport = validator.validate(jsonNode)
return report.isSuccess
}fun logConversionError(response: String, error: Exception) {
logger.error("""
Conversion failed:
- Response length: ${response.length}
- First 100 chars: ${response.take(100)}
- Error: ${error.message}
""".trimIndent())
}class ErrorMetrics {
private val conversionErrors = AtomicInteger(0)
private val modelSelectionErrors = AtomicInteger(0)
fun recordConversionError() {
conversionErrors.incrementAndGet()
}
fun recordModelSelectionError() {
modelSelectionErrors.incrementAndGet()
}
fun report(): String {
return """
Conversion errors: ${conversionErrors.get()}
Model selection errors: ${modelSelectionErrors.get()}
""".trimIndent()
}
}val options = LlmOptions.withModel("gpt-4")
.withTimeout(Duration.ofSeconds(30))
try {
val response = llmClient.call(prompt, options)
} catch (e: TimeoutException) {
logger.error("Request timed out after 30 seconds")
// Retry with longer timeout or different model
}import java.time.Duration
stream
.timeout(Duration.ofSeconds(60))
.onErrorResume { error ->
logger.error("Stream timeout", error)
Flux.empty() // Return empty stream
}
.subscribe { event -> processEvent(event) }class LLMCircuitBreaker(
private val failureThreshold: Int = 5,
private val resetTimeout: Duration = Duration.ofMinutes(5)
) {
private var failures = 0
private var lastFailure: Instant? = null
private var state = State.CLOSED
enum class State { CLOSED, OPEN, HALF_OPEN }
fun <T> execute(operation: () -> T): T? {
when (state) {
State.OPEN -> {
val elapsed = Duration.between(lastFailure, Instant.now())
if (elapsed > resetTimeout) {
state = State.HALF_OPEN
logger.info("Circuit breaker half-open, testing...")
} else {
logger.warn("Circuit breaker open, rejecting call")
return null
}
}
State.CLOSED, State.HALF_OPEN -> {}
}
return try {
val result = operation()
onSuccess()
result
} catch (e: Exception) {
onFailure()
throw e
}
}
private fun onSuccess() {
failures = 0
state = State.CLOSED
}
private fun onFailure() {
failures++
lastFailure = Instant.now()
if (failures >= failureThreshold) {
state = State.OPEN
logger.error("Circuit breaker opened after $failures failures")
}
}
}// BAD
val person = converter.convert(response)
println(person.name) // NullPointerException if parsing failed
// GOOD
val person = converter.convert(response)
if (person != null) {
println(person.name)
} else {
handleError()
}
// BETTER
val person = converter.convert(response) ?: return handleError()// BAD
val options = LlmOptions.withModel("expensive-model")
// GOOD
val options = LlmOptions.withFirstAvailableLlmOf(
"expensive-model",
"fallback-model",
"cheap-model"
)// BAD
logger.error("Parse failed")
// GOOD
logger.error("Parse failed for response: ${response.take(200)}...")// Validate input
if (!validateResponse(response)) return null
// Parse
val result = converter.convert(response)
// Validate output
if (result == null || !validateResult(result)) return null
return result// Quick tasks
val quickOptions = LlmOptions.withModel("gpt-3.5-turbo")
.withTimeout(Duration.ofSeconds(10))
// Complex tasks
val complexOptions = LlmOptions.withModel("gpt-4")
.withTimeout(Duration.ofSeconds(60))@Test
fun `test conversion with invalid JSON`() {
val result = converter.convert("{invalid json}")
assertNull(result)
}
@Test
fun `test model selection with unavailable model`() {
val criteria = ModelSelectionCriteria.byName("nonexistent")
assertThrows<NoSuitableModelException> {
selectModel(criteria, emptyList())
}
}
@Test
fun `test streaming with partial errors`() {
val jsonl = """
{"valid": "json"}
{invalid}
{"also": "valid"}
""".trimIndent()
val results = mutableListOf<Map<String, String>>()
converter.convertStream(jsonl)
.subscribe { results.add(it) }
assertEquals(2, results.size) // Only valid lines
}Install with Tessl CLI
npx tessl i tessl/maven-com-embabel-agent--embabel-agent-common@0.3.0