CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/maven-org-springframework-boot--spring-boot-autoconfigure

Spring Boot AutoConfigure provides auto-configuration capabilities that automatically configure Spring applications based on jar dependencies present on the classpath

Pending
Overview
Eval results
Files

real-world-scenarios.mddocs/examples/

Real-World Scenarios

Production-ready examples demonstrating Spring Boot AutoConfigure in real applications.

Scenario 1: Microservice with Multiple Data Sources

Requirements

  • Primary PostgreSQL database for business data
  • Redis for caching
  • MongoDB for audit logs
  • All configurable via properties

Implementation

@AutoConfiguration
@EnableConfigurationProperties(DataSourcesProperties.class)
public class MultiDataSourceAutoConfiguration {
    
    @Bean
    @Primary
    @ConditionalOnProperty(prefix = "datasources.primary", name = "url")
    public DataSource primaryDataSource(DataSourcesProperties properties) {
        return DataSourceBuilder.create()
            .url(properties.getPrimary().getUrl())
            .username(properties.getPrimary().getUsername())
            .password(properties.getPrimary().getPassword())
            .build();
    }
    
    @Bean
    @ConditionalOnClass(RedisConnectionFactory.class)
    @ConditionalOnProperty(prefix = "datasources.redis", name = "host")
    public RedisConnectionFactory redisConnectionFactory(
            DataSourcesProperties properties) {
        RedisStandaloneConfiguration config = new RedisStandaloneConfiguration();
        config.setHostName(properties.getRedis().getHost());
        config.setPort(properties.getRedis().getPort());
        return new LettuceConnectionFactory(config);
    }
    
    @Bean
    @ConditionalOnClass(MongoClient.class)
    @ConditionalOnProperty(prefix = "datasources.mongo", name = "uri")
    public MongoClient mongoClient(DataSourcesProperties properties) {
        return MongoClients.create(properties.getMongo().getUri());
    }
}

Configuration

datasources:
  primary:
    url: jdbc:postgresql://localhost:5432/business
    username: ${DB_USER}
    password: ${DB_PASSWORD}
  redis:
    host: localhost
    port: 6379
  mongo:
    uri: mongodb://localhost:27017/audit

Scenario 2: API Gateway with Dynamic Routing

Requirements

  • Route requests based on configuration
  • Support multiple backend services
  • Circuit breaker integration
  • Request/response logging

Implementation

@AutoConfiguration
@ConditionalOnClass(RouteLocator.class)
@EnableConfigurationProperties(GatewayProperties.class)
public class GatewayAutoConfiguration {
    
    @Bean
    public RouteLocator customRouteLocator(
            RouteLocatorBuilder builder,
            GatewayProperties properties) {
        
        RouteLocatorBuilder.Builder routes = builder.routes();
        
        properties.getRoutes().forEach((id, config) -> {
            routes.route(id, r -> r
                .path(config.getPath())
                .filters(f -> f
                    .circuitBreaker(c -> c
                        .setName(id + "-cb")
                        .setFallbackUri(config.getFallbackUri()))
                    .retry(config.getRetries()))
                .uri(config.getUri()));
        });
        
        return routes.build();
    }
    
    @Bean
    @ConditionalOnProperty(name = "gateway.logging.enabled", havingValue = "true")
    public GlobalFilter loggingFilter() {
        return (exchange, chain) -> {
            log.info("Request: {} {}", 
                exchange.getRequest().getMethod(),
                exchange.getRequest().getURI());
            return chain.filter(exchange);
        };
    }
}

Configuration

gateway:
  logging:
    enabled: true
  routes:
    user-service:
      path: /api/users/**
      uri: http://user-service:8080
      fallbackUri: forward:/fallback/users
      retries: 3
    order-service:
      path: /api/orders/**
      uri: http://order-service:8080
      fallbackUri: forward:/fallback/orders
      retries: 2

Scenario 3: Multi-Tenant SaaS Application

Requirements

  • Tenant identification from subdomain or header
  • Separate database per tenant
  • Tenant-specific caching
  • Cross-tenant data isolation

Implementation

@AutoConfiguration
@EnableConfigurationProperties(TenantProperties.class)
public class MultiTenantAutoConfiguration {
    
    @Bean
    public TenantIdentifierResolver tenantIdentifierResolver() {
        return new HeaderBasedTenantResolver();
    }
    
    @Bean
    public DataSource multiTenantDataSource(TenantProperties properties) {
        Map<Object, Object> dataSources = new HashMap<>();
        
        properties.getTenants().forEach((tenantId, config) -> {
            HikariDataSource ds = new HikariDataSource();
            ds.setJdbcUrl(config.getJdbcUrl());
            ds.setUsername(config.getUsername());
            ds.setPassword(config.getPassword());
            ds.setMaximumPoolSize(config.getMaxPoolSize());
            dataSources.put(tenantId, ds);
        });
        
        TenantRoutingDataSource routingDataSource = 
            new TenantRoutingDataSource();
        routingDataSource.setTargetDataSources(dataSources);
        routingDataSource.setDefaultTargetDataSource(
            dataSources.get(properties.getDefaultTenant())
        );
        routingDataSource.afterPropertiesSet();
        
        return routingDataSource;
    }
    
    @Bean
    public CacheManager multiTenantCacheManager() {
        return new TenantAwareCacheManager();
    }
    
    @Bean
    public FilterRegistrationBean<TenantFilter> tenantFilter() {
        FilterRegistrationBean<TenantFilter> registration = 
            new FilterRegistrationBean<>();
        registration.setFilter(new TenantFilter());
        registration.addUrlPatterns("/api/*");
        registration.setOrder(Ordered.HIGHEST_PRECEDENCE);
        return registration;
    }
}

Configuration

multi-tenant:
  default-tenant: tenant1
  tenants:
    tenant1:
      jdbc-url: jdbc:postgresql://localhost:5432/tenant1_db
      username: tenant1_user
      password: ${TENANT1_PASSWORD}
      max-pool-size: 10
    tenant2:
      jdbc-url: jdbc:postgresql://localhost:5432/tenant2_db
      username: tenant2_user
      password: ${TENANT2_PASSWORD}
      max-pool-size: 10

Scenario 4: Event-Driven Architecture

Requirements

  • Kafka integration
  • Event publishing and consumption
  • Dead letter queue handling
  • Event replay capability

Implementation

@AutoConfiguration
@ConditionalOnClass(KafkaTemplate.class)
@EnableConfigurationProperties(EventProperties.class)
public class EventDrivenAutoConfiguration {
    
    @Bean
    public KafkaTemplate<String, Object> kafkaTemplate(
            ProducerFactory<String, Object> producerFactory) {
        return new KafkaTemplate<>(producerFactory);
    }
    
    @Bean
    @ConditionalOnProperty(name = "events.dlq.enabled", havingValue = "true")
    public KafkaListenerErrorHandler dlqErrorHandler(
            KafkaTemplate<String, Object> template,
            EventProperties properties) {
        return (message, exception) -> {
            template.send(
                properties.getDlq().getTopic(),
                message.getPayload()
            );
            return null;
        };
    }
    
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object>
            kafkaListenerContainerFactory(
                ConsumerFactory<String, Object> consumerFactory,
                EventProperties properties) {
        
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory);
        factory.setConcurrency(properties.getConcurrency());
        factory.getContainerProperties().setAckMode(
            properties.getAckMode()
        );
        return factory;
    }
}

Configuration

spring:
  kafka:
    bootstrap-servers: localhost:9092
    consumer:
      group-id: my-service
      auto-offset-reset: earliest
    producer:
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.springframework.kafka.support.serializer.JsonSerializer

events:
  concurrency: 3
  ack-mode: MANUAL
  dlq:
    enabled: true
    topic: dlq-events

Scenario 5: Observability Stack

Requirements

  • Distributed tracing
  • Metrics collection
  • Health checks
  • Structured logging

Implementation

@AutoConfiguration
@ConditionalOnClass(MeterRegistry.class)
@EnableConfigurationProperties(ObservabilityProperties.class)
public class ObservabilityAutoConfiguration {
    
    @Bean
    @ConditionalOnProperty(name = "observability.tracing.enabled", havingValue = "true")
    public Tracer tracer(ObservabilityProperties properties) {
        return Tracing.newBuilder()
            .localServiceName(properties.getServiceName())
            .spanReporter(zipkinSpanReporter(properties))
            .build()
            .tracer();
    }
    
    @Bean
    public MeterRegistryCustomizer<MeterRegistry> metricsCommonTags(
            ObservabilityProperties properties) {
        return registry -> registry.config()
            .commonTags(
                "application", properties.getServiceName(),
                "environment", properties.getEnvironment()
            );
    }
    
    @Bean
    @ConditionalOnProperty(name = "observability.health.detailed", havingValue = "true")
    public HealthContributorRegistry healthContributorRegistry(
            Collection<HealthContributor> contributors) {
        return new DefaultHealthContributorRegistry(contributors);
    }
    
    @Bean
    public LoggingAspect loggingAspect(ObservabilityProperties properties) {
        return new LoggingAspect(properties.getLogging());
    }
}

Configuration

observability:
  service-name: my-service
  environment: production
  tracing:
    enabled: true
    zipkin-url: http://zipkin:9411
  health:
    detailed: true
  logging:
    level: INFO
    include-trace-id: true

Scenario 6: Security with OAuth2

Requirements

  • OAuth2 resource server
  • JWT token validation
  • Role-based access control
  • API key authentication fallback

Implementation

@AutoConfiguration
@ConditionalOnClass(SecurityFilterChain.class)
@EnableConfigurationProperties(SecurityProperties.class)
public class SecurityAutoConfiguration {
    
    @Bean
    @ConditionalOnProperty(name = "security.oauth2.enabled", havingValue = "true")
    public SecurityFilterChain oauth2FilterChain(
            HttpSecurity http,
            SecurityProperties properties) throws Exception {
        
        http
            .authorizeHttpRequests(auth -> {
                properties.getPublicPaths().forEach(path ->
                    auth.requestMatchers(path).permitAll()
                );
                auth.anyRequest().authenticated();
            })
            .oauth2ResourceServer(oauth2 -> oauth2
                .jwt(jwt -> jwt
                    .jwtAuthenticationConverter(jwtConverter())
                )
            );
        
        return http.build();
    }
    
    @Bean
    @ConditionalOnProperty(name = "security.api-key.enabled", havingValue = "true")
    public FilterRegistrationBean<ApiKeyFilter> apiKeyFilter(
            SecurityProperties properties) {
        FilterRegistrationBean<ApiKeyFilter> registration = 
            new FilterRegistrationBean<>();
        registration.setFilter(new ApiKeyFilter(properties.getApiKey()));
        registration.addUrlPatterns("/api/*");
        return registration;
    }
    
    private JwtAuthenticationConverter jwtConverter() {
        JwtGrantedAuthoritiesConverter grantedAuthoritiesConverter = 
            new JwtGrantedAuthoritiesConverter();
        grantedAuthoritiesConverter.setAuthoritiesClaimName("roles");
        grantedAuthoritiesConverter.setAuthorityPrefix("ROLE_");
        
        JwtAuthenticationConverter converter = new JwtAuthenticationConverter();
        converter.setJwtGrantedAuthoritiesConverter(grantedAuthoritiesConverter);
        return converter;
    }
}

Configuration

security:
  oauth2:
    enabled: true
    resource-server:
      jwt:
        issuer-uri: https://auth.example.com
        jwk-set-uri: https://auth.example.com/.well-known/jwks.json
  api-key:
    enabled: true
    header-name: X-API-Key
  public-paths:
    - /health
    - /metrics
    - /public/**

Scenario 7: Batch Processing

Requirements

  • Scheduled batch jobs
  • Parallel processing
  • Job monitoring
  • Failure recovery

Implementation

@AutoConfiguration
@ConditionalOnClass(JobLauncher.class)
@EnableBatchProcessing
@EnableConfigurationProperties(BatchProperties.class)
public class BatchAutoConfiguration {
    
    @Bean
    public Job dataProcessingJob(
            JobRepository jobRepository,
            Step processStep,
            BatchProperties properties) {
        return new JobBuilder("dataProcessingJob", jobRepository)
            .incrementer(new RunIdIncrementer())
            .flow(processStep)
            .end()
            .listener(jobExecutionListener())
            .build();
    }
    
    @Bean
    public Step processStep(
            JobRepository jobRepository,
            PlatformTransactionManager transactionManager,
            ItemReader<Data> reader,
            ItemProcessor<Data, ProcessedData> processor,
            ItemWriter<ProcessedData> writer,
            BatchProperties properties) {
        return new StepBuilder("processStep", jobRepository)
            .<Data, ProcessedData>chunk(properties.getChunkSize(), transactionManager)
            .reader(reader)
            .processor(processor)
            .writer(writer)
            .taskExecutor(taskExecutor(properties))
            .build();
    }
    
    @Bean
    @ConditionalOnProperty(name = "batch.scheduling.enabled", havingValue = "true")
    @Scheduled(cron = "${batch.scheduling.cron}")
    public void scheduledJobLauncher(
            JobLauncher jobLauncher,
            Job job) throws Exception {
        JobParameters params = new JobParametersBuilder()
            .addLong("time", System.currentTimeMillis())
            .toJobParameters();
        jobLauncher.run(job, params);
    }
    
    private TaskExecutor taskExecutor(BatchProperties properties) {
        ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
        executor.setCorePoolSize(properties.getThreads());
        executor.setMaxPoolSize(properties.getThreads() * 2);
        executor.setThreadNamePrefix("batch-");
        executor.initialize();
        return executor;
    }
}

Configuration

batch:
  chunk-size: 100
  threads: 4
  scheduling:
    enabled: true
    cron: "0 0 2 * * ?"  # 2 AM daily
  retry:
    max-attempts: 3
    backoff-period: 5000

Best Practices from Real-World Usage

  1. Configuration Validation: Validate early, fail fast
  2. Environment Variables: Use for secrets and environment-specific values
  3. Health Checks: Implement comprehensive health indicators
  4. Graceful Shutdown: Configure proper shutdown timeouts
  5. Connection Pooling: Tune pool sizes based on load
  6. Monitoring: Always include metrics and tracing
  7. Documentation: Document all configuration properties
  8. Testing: Test all conditional paths

Next Steps

  • Edge Cases - Handle special scenarios
  • Custom Auto-Configurations - Build your own
  • Reference Documentation - Detailed API specs

See Also

Install with Tessl CLI

npx tessl i tessl/maven-org-springframework-boot--spring-boot-autoconfigure

docs

examples

common-patterns.md

edge-cases.md

real-world-scenarios.md

index.md

tile.json