Base starter module for the Embabel Agent Framework providing core dependencies for building agentic flows on the JVM with Spring Boot integration and GOAP-based intelligent path finding.
Complete reference for all configuration properties in embabel-agent-starter.
embabel:
agent:
platform:
scanning:
annotation: trueembabel.agent.platform.scanning.annotation=trueexport EMBABEL_AGENT_PLATFORM_SCANNING_ANNOTATION=truePrefix: embabel.agent.platform
| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.scanning.annotation | boolean | true | Enable scanning for @Agent annotated classes |
embabel.agent.platform.scanning.bean | boolean | false | Enable scanning for agent beans |
embabel:
agent:
platform:
scanning:
annotation: true
bean: false| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.autonomy.agent-confidence-cut-off | double | 0.6 | Minimum confidence for agent selection (0.0-1.0) |
embabel.agent.platform.autonomy.goal-confidence-cut-off | double | 0.6 | Minimum confidence for goal selection (0.0-1.0) |
embabel:
agent:
platform:
autonomy:
agent-confidence-cut-off: 0.6
goal-confidence-cut-off: 0.6| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.ranking.max-attempts | int | 5 | Maximum ranking attempts |
embabel.agent.platform.ranking.backoff-millis | long | 100 | Initial backoff delay in milliseconds |
embabel.agent.platform.ranking.backoff-multiplier | double | 5.0 | Backoff multiplier for exponential backoff |
embabel.agent.platform.ranking.backoff-max-interval | long | 180000 | Maximum backoff interval in milliseconds |
embabel:
agent:
platform:
ranking:
max-attempts: 5
backoff-millis: 100
backoff-multiplier: 5.0
backoff-max-interval: 180000| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.process-id-generation.include-version | boolean | false | Include version in process IDs |
embabel.agent.platform.process-id-generation.include-agent-name | boolean | false | Include agent name in process IDs |
embabel:
agent:
platform:
process-id-generation:
include-version: false
include-agent-name: false| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.llm-operations.prompts.maybe-prompt-template | String | "maybe_prompt_contribution" | Template name for maybe prompts |
embabel.agent.platform.llm-operations.prompts.generate-examples-by-default | boolean | true | Generate examples in prompts |
embabel:
agent:
platform:
llm-operations:
prompts:
maybe-prompt-template: maybe_prompt_contribution
generate-examples-by-default: true| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.llm-operations.data-binding.max-attempts | int | 10 | Maximum data binding attempts |
embabel.agent.platform.llm-operations.data-binding.fixed-backoff-millis | long | 30 | Fixed backoff delay in milliseconds |
embabel:
agent:
platform:
llm-operations:
data-binding:
max-attempts: 10
fixed-backoff-millis: 30| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.sse.max-buffer-size | int | 100 | Maximum buffer size per SSE stream |
embabel.agent.platform.sse.max-process-buffers | int | 1000 | Maximum number of process buffers |
embabel:
agent:
platform:
sse:
max-buffer-size: 100
max-process-buffers: 1000| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.models.anthropic.max-attempts | int | 10 | Maximum retry attempts |
embabel.agent.platform.models.anthropic.backoff-millis | long | 5000 | Initial backoff in milliseconds |
embabel.agent.platform.models.anthropic.backoff-multiplier | double | 5.0 | Backoff multiplier |
embabel.agent.platform.models.anthropic.backoff-max-interval | long | 180000 | Maximum backoff interval in milliseconds |
embabel:
agent:
platform:
models:
anthropic:
max-attempts: 10
backoff-millis: 5000
backoff-multiplier: 5.0
backoff-max-interval: 180000| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.models.openai.max-attempts | int | 10 | Maximum retry attempts |
embabel.agent.platform.models.openai.backoff-millis | long | 5000 | Initial backoff in milliseconds |
embabel.agent.platform.models.openai.backoff-multiplier | double | 5.0 | Backoff multiplier |
embabel.agent.platform.models.openai.backoff-max-interval | long | 180000 | Maximum backoff interval in milliseconds |
embabel:
agent:
platform:
models:
openai:
max-attempts: 10
backoff-millis: 5000
backoff-multiplier: 5.0
backoff-max-interval: 180000| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.test.mock-mode | boolean | true | Enable mock mode for testing |
embabel:
agent:
platform:
test:
mock-mode: true| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.platform.tools.includes | Map<String, GroupConfig> | {} | Map of tool group names to configurations |
embabel.agent.platform.tools.excludes | List<String> | [] | Global list of tools to exclude |
embabel.agent.platform.tools.version | String | null | Tool groups version |
embabel:
agent:
platform:
tools:
includes:
web-tools:
tools:
- fetch_url
- search_web
github-tools:
tools:
- create_issue
- list_repos
excludes:
- dangerous_operation
- deprecated_tool
version: "1.0.0"Prefix: embabel.agent.logging
| Property | Type | Default | Description |
|---|---|---|---|
embabel.agent.logging.personality | String | null | Logging theme/personality |
embabel:
agent:
logging:
personality: starwarsAllowed values: starwars, severance, colossus, hitchhiker, montypython
Prefix: spring.ai.mcp.client
| Property | Type | Default | Description |
|---|---|---|---|
spring.ai.mcp.client.enabled | boolean | true | Enable MCP client |
spring.ai.mcp.client.type | String | "SYNC" | Client type: SYNC or ASYNC |
spring.ai.mcp.client.name | String | "embabel" | Client implementation name |
spring.ai.mcp.client.version | String | "1.0.0" | Client implementation version |
spring.ai.mcp.client.request-timeout | Duration | 30s | Request timeout duration |
spring.ai.mcp.client.initialized | boolean | true | Initialize client on creation |
spring:
ai:
mcp:
client:
enabled: true
type: SYNC
name: embabel
version: 1.0.0
request-timeout: 30s
initialized: truespring:
ai:
mcp:
client:
stdio:
connections:
docker-mcp:
command: docker
args:
- run
- -i
- --rm
- alpine/socat
- STDIO
- TCP:host.docker.internal:8811Structure:
spring.ai.mcp.client.stdio.connections.<name>.command (String) - Command to executespring.ai.mcp.client.stdio.connections.<name>.args (List<String>) - Command arguments| Property | Type | Default | Description |
|---|---|---|---|
spring.application.name | String | "agent-api" | Application name |
spring.threads.virtual.enabled | boolean | true | Enable virtual threads |
spring.output.ansi.enabled | String | "ALWAYS" | ANSI output mode |
spring:
application:
name: agent-api
threads:
virtual:
enabled: true
output:
ansi:
enabled: ALWAYS| Property | Type | Default | Description |
|---|---|---|---|
spring.ai.ollama.base-url | String | "http://localhost:11434" | Ollama endpoint |
spring:
ai:
ollama:
base-url: http://localhost:11434| Property | Type | Default | Description |
|---|---|---|---|
spring.ai.openai.api-key | String | null | OpenAI API key |
spring:
ai:
openai:
api-key: ${OPENAI_API_KEY}| Property | Type | Default | Description |
|---|---|---|---|
spring.ai.anthropic.api-key | String | null | Anthropic API key |
spring:
ai:
anthropic:
api-key: ${ANTHROPIC_API_KEY}| Property | Type | Default | Description |
|---|---|---|---|
spring.ai.gemini.api-key | String | null | Google API key |
spring:
ai:
gemini:
api-key: ${GOOGLE_STUDIO_API_KEY}spring:
ai:
bedrock:
aws:
region: us-east-1
access-key: ${AWS_ACCESS_KEY_ID}
secret-key: ${AWS_SECRET_ACCESS_KEY}| Property | Type | Default | Description |
|---|---|---|---|
management.tracing.enabled | boolean | false | Enable distributed tracing |
management:
tracing:
enabled: falsePrefix: embabel.models
| Property | Type | Default | Description |
|---|---|---|---|
embabel.models.default-llm | String | "gpt-4.1-mini" | Default LLM model |
embabel.models.default-embedding-model | String | "text-embedding-3-small" | Default embedding model |
embabel:
models:
default-llm: gpt-4.1-mini
default-embedding-model: text-embedding-3-small| Property | Type | Default | Description |
|---|---|---|---|
logging.pattern.console | String | See below | Console log pattern with ANSI colors |
logging:
pattern:
console: "%clr(%d{HH:mm:ss.SSS}){faint} %clr([%t]){magenta} %clr(%-5level) %clr(%logger{0}){cyan} %clr(-){faint} %msg%n"# OpenAI
export OPENAI_API_KEY=your-openai-api-key
# Anthropic
export ANTHROPIC_API_KEY=your-anthropic-api-key
# Google
export GOOGLE_STUDIO_API_KEY=your-google-api-key
# AWS Bedrock
export AWS_ACCESS_KEY_ID=your-aws-key
export AWS_SECRET_ACCESS_KEY=your-aws-secret
export AWS_REGION=us-east-1
# Ollama
export OLLAMA_BASE_URL=http://localhost:11434
# Spring Profiles
export SPRING_PROFILES_ACTIVE=dev
# Property overrides (examples)
export EMBABEL_AGENT_PLATFORM_SCANNING_ANNOTATION=true
export EMBABEL_AGENT_PLATFORM_AUTONOMY_AGENT_CONFIDENCE_CUT_OFF=0.7
export EMBABEL_AGENT_LOGGING_PERSONALITY=starwars
export EMBABEL_MODELS_DEFAULT_LLM=gpt-4# application.yml
spring:
application:
name: my-agent-app
threads:
virtual:
enabled: true
output:
ansi:
enabled: ALWAYS
ai:
openai:
api-key: ${OPENAI_API_KEY}
anthropic:
api-key: ${ANTHROPIC_API_KEY}
ollama:
base-url: http://localhost:11434
mcp:
client:
enabled: true
type: SYNC
request-timeout: 30s
stdio:
connections:
github-mcp:
command: npx
args:
- -y
- "@modelcontextprotocol/server-github"
embabel:
agent:
platform:
scanning:
annotation: true
bean: false
autonomy:
agent-confidence-cut-off: 0.6
goal-confidence-cut-off: 0.6
ranking:
max-attempts: 5
backoff-millis: 100
backoff-multiplier: 5.0
backoff-max-interval: 180000
llm-operations:
prompts:
maybe-prompt-template: maybe_prompt_contribution
generate-examples-by-default: true
data-binding:
max-attempts: 10
fixed-backoff-millis: 30
sse:
max-buffer-size: 100
max-process-buffers: 1000
models:
anthropic:
max-attempts: 10
backoff-millis: 5000
backoff-multiplier: 5.0
backoff-max-interval: 180000
openai:
max-attempts: 10
backoff-millis: 5000
backoff-multiplier: 5.0
backoff-max-interval: 180000
test:
mock-mode: false
tools:
includes:
web-tools:
tools:
- fetch_url
- search_web
excludes:
- dangerous_tool
logging:
personality: starwars
models:
default-llm: gpt-4.1-mini
default-embedding-model: text-embedding-3-small
management:
tracing:
enabled: false
logging:
pattern:
console: "%clr(%d{HH:mm:ss.SSS}){faint} %clr([%t]){magenta} %clr(%-5level) %clr(%logger{0}){cyan} %clr(-){faint} %msg%n"
level:
com.embabel.agent: INFOembabel:
agent:
platform:
test:
mock-mode: true
models:
anthropic:
max-attempts: 3
backoff-millis: 1000
logging:
personality: montypython
logging:
level:
com.embabel.agent: DEBUGembabel:
agent:
platform:
test:
mock-mode: false
models:
anthropic:
max-attempts: 15
backoff-millis: 10000
backoff-max-interval: 300000
logging:
personality: colossus
logging:
level:
com.embabel.agent: INFOActivate with:
export SPRING_PROFILES_ACTIVE=dev
# or
java -jar app.jar --spring.profiles.active=prodtessl i tessl/maven-com-embabel-agent--embabel-agent-starter@0.3.1docs