ONNX-based Transformer models for text embeddings within the Spring AI framework
The TransformersEmbeddingModelAutoConfiguration class provides Spring Boot auto-configuration for the TransformersEmbeddingModel. This eliminates the need for manual bean creation and configuration, automatically setting up the embedding model based on application properties.
The auto-configuration automatically creates a TransformersEmbeddingModel bean when the required dependencies are present and configuration conditions are met.
/**
* Creates and configures a TransformersEmbeddingModel bean.
*
* @param properties Configuration properties from application.properties/yaml
* @param observationRegistry Optional ObservationRegistry for monitoring
* @param observationConvention Optional custom observation convention
* @return Configured TransformersEmbeddingModel bean
* @throws Exception if initialization fails
*/
@Bean
@ConditionalOnMissingBean
public TransformersEmbeddingModel embeddingModel(
TransformersEmbeddingModelProperties properties,
ObjectProvider<ObservationRegistry> observationRegistry,
ObjectProvider<EmbeddingModelObservationConvention> observationConvention
) throws Exception;Usage:
import org.springframework.ai.transformers.TransformersEmbeddingModel;
import org.springframework.stereotype.Service;
@Service
public class MyEmbeddingService {
private final TransformersEmbeddingModel embeddingModel;
// Auto-configured bean is automatically injected
public MyEmbeddingService(TransformersEmbeddingModel embeddingModel) {
this.embeddingModel = embeddingModel;
}
public float[] generateEmbedding(String text) {
// Bean is already initialized by Spring Boot
return embeddingModel.embed(text);
}
}Constructor Injection (Recommended):
@Component
public class EmbeddingProcessor {
private final TransformersEmbeddingModel model;
// Constructor injection - immutable and testable
public EmbeddingProcessor(TransformersEmbeddingModel model) {
this.model = model;
}
public void process(List<String> texts) {
List<float[]> embeddings = model.embed(texts);
// Process embeddings
}
}Field Injection (Not Recommended but Supported):
@Component
public class LegacyEmbeddingService {
@Autowired
private TransformersEmbeddingModel model;
public float[] embed(String text) {
return model.embed(text);
}
}Setter Injection:
@Component
public class ConfigurableEmbeddingService {
private TransformersEmbeddingModel model;
@Autowired
public void setEmbeddingModel(TransformersEmbeddingModel model) {
this.model = model;
}
}The auto-configuration activates only when all conditions are met:
@AutoConfiguration
@EnableConfigurationProperties(TransformersEmbeddingModelProperties.class)
@ConditionalOnProperty(
name = "spring.ai.embedding.model",
havingValue = "transformers",
matchIfMissing = true
)
@ConditionalOnClass({
OrtSession.class,
HuggingFaceTokenizer.class,
TransformersEmbeddingModel.class
})
public class TransformersEmbeddingModelAutoConfiguration {
// Auto-configuration bean methods
}Class Conditions - All required classes must be on classpath:
// Required dependencies:
ai.onnxruntime.OrtSession // ONNX Runtime
ai.djl.huggingface.tokenizers.HuggingFaceTokenizer // HuggingFace Tokenizers
org.springframework.ai.transformers.TransformersEmbeddingModel // Embedding model class<!-- Maven dependencies -->
<dependencies>
<!-- ONNX Runtime -->
<dependency>
<groupId>com.microsoft.onnxruntime</groupId>
<artifactId>onnxruntime</artifactId>
<version>1.16.0</version>
</dependency>
<!-- HuggingFace Tokenizers -->
<dependency>
<groupId>ai.djl.huggingface</groupId>
<artifactId>tokenizers</artifactId>
<version>0.25.0</version>
</dependency>
<!-- Spring AI Transformers -->
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-transformers</artifactId>
<version>1.1.2</version>
</dependency>
</dependencies>Property Condition:
// Property: spring.ai.embedding.model
// - havingValue = "transformers" - activates when property equals "transformers"
// - matchIfMissing = true - activates when property is not set (default)# Auto-configuration activates (default)
# (property not set)
# Auto-configuration activates (explicit)
spring.ai.embedding.model=transformers
# Auto-configuration does NOT activate
spring.ai.embedding.model=openai
spring.ai.embedding.model=azureBean Condition:
// @ConditionalOnMissingBean
// Auto-configuration only creates bean if no TransformersEmbeddingModel bean exists// Scenario 1: No existing bean - auto-configuration creates bean
// (No custom bean definition)
// Scenario 2: Custom bean exists - auto-configuration skips
@Configuration
public class CustomConfig {
@Bean
public TransformersEmbeddingModel customEmbeddingModel() throws Exception {
TransformersEmbeddingModel model = new TransformersEmbeddingModel();
model.setGpuDeviceId(0);
model.afterPropertiesSet();
return model;
}
// Auto-configuration will NOT create another bean
}The auto-configuration bean factory method performs these configuration steps:
@Bean
@ConditionalOnMissingBean
public TransformersEmbeddingModel embeddingModel(
TransformersEmbeddingModelProperties properties,
ObjectProvider<ObservationRegistry> observationRegistry,
ObjectProvider<EmbeddingModelObservationConvention> observationConvention
) throws Exception {
// 1. Create model instance with metadata mode and observation registry
TransformersEmbeddingModel embeddingModel = new TransformersEmbeddingModel(
properties.getMetadataMode(),
observationRegistry.getIfUnique(() -> ObservationRegistry.NOOP)
);
// 2. Configure caching
embeddingModel.setDisableCaching(!properties.getCache().isEnabled());
embeddingModel.setResourceCacheDirectory(properties.getCache().getDirectory());
// 3. Configure tokenizer
embeddingModel.setTokenizerResource(properties.getTokenizer().getUri());
embeddingModel.setTokenizerOptions(properties.getTokenizer().getOptions());
// 4. Configure ONNX model
embeddingModel.setModelResource(properties.getOnnx().getModelUri());
embeddingModel.setGpuDeviceId(properties.getOnnx().getGpuDeviceId());
embeddingModel.setModelOutputName(properties.getOnnx().getModelOutputName());
// 5. Configure observation
observationConvention.ifAvailable(embeddingModel::setObservationConvention);
// 6. Initialize model (Spring automatically calls afterPropertiesSet())
// Note: This happens automatically for InitializingBean
return embeddingModel;
}Configuration Mapping:
// From TransformersEmbeddingModelProperties to TransformersEmbeddingModel
// Caching configuration
embeddingModel.setDisableCaching(!properties.getCache().isEnabled());
// Property: spring.ai.embedding.transformer.cache.enabled (default: true)
// Setter: setDisableCaching(boolean) - inverted logic
embeddingModel.setResourceCacheDirectory(properties.getCache().getDirectory());
// Property: spring.ai.embedding.transformer.cache.directory
// Default: {java.io.tmpdir}/spring-ai-onnx-generative
// Tokenizer configuration
embeddingModel.setTokenizerResource(properties.getTokenizer().getUri());
// Property: spring.ai.embedding.transformer.tokenizer.uri
// Default: https://raw.githubusercontent.com/.../tokenizer.json
embeddingModel.setTokenizerOptions(properties.getTokenizer().getOptions());
// Property: spring.ai.embedding.transformer.tokenizer.options.*
// Default: empty map
// ONNX model configuration
embeddingModel.setModelResource(properties.getOnnx().getModelUri());
// Property: spring.ai.embedding.transformer.onnx.model-uri
// Default: https://github.com/.../model.onnx
embeddingModel.setGpuDeviceId(properties.getOnnx().getGpuDeviceId());
// Property: spring.ai.embedding.transformer.onnx.gpu-device-id
// Default: -1 (CPU)
embeddingModel.setModelOutputName(properties.getOnnx().getModelOutputName());
// Property: spring.ai.embedding.transformer.onnx.model-output-name
// Default: "last_hidden_state"
// Observation configuration
observationConvention.ifAvailable(embeddingModel::setObservationConvention);
// Bean: EmbeddingModelObservationConvention (optional)ObjectProvider Usage:
// ObjectProvider provides optional dependency injection
public interface ObjectProvider<T> {
/**
* Get object if available, otherwise use default supplier.
*/
T getIfUnique(Supplier<T> defaultSupplier);
/**
* Execute action if object is available.
*/
void ifAvailable(Consumer<T> action);
/**
* Get object if available, otherwise return null.
*/
T getIfAvailable();
}// Example: ObservationRegistry with fallback
ObservationRegistry registry = observationRegistry.getIfUnique(() -> ObservationRegistry.NOOP);
// If ObservationRegistry bean exists: use it
// If no ObservationRegistry bean: use NOOP (no-op registry)
// Example: Optional configuration
observationConvention.ifAvailable(embeddingModel::setObservationConvention);
// If EmbeddingModelObservationConvention bean exists: set it
// If no bean: do nothing (use default)To disable the auto-configuration and provide your own bean:
spring.autoconfigure.exclude=org.springframework.ai.model.transformers.autoconfigure.TransformersEmbeddingModelAutoConfigurationEffect: Auto-configuration class is not processed at all.
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.ai.model.transformers.autoconfigure.TransformersEmbeddingModelAutoConfiguration;
@SpringBootApplication(exclude = {
TransformersEmbeddingModelAutoConfiguration.class
})
public class Application {
public static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
}Effect: Same as Option 1, but configured in Java code.
# Configure to use a different embedding model
spring.ai.embedding.model=openaiEffect: The @ConditionalOnProperty check fails, preventing auto-configuration activation.
Available Values:
# Different embedding providers
spring.ai.embedding.model=transformers # Activates auto-configuration
spring.ai.embedding.model=openai # Deactivates auto-configuration
spring.ai.embedding.model=azure # Deactivates auto-configuration
spring.ai.embedding.model=bedrock # Deactivates auto-configuration
spring.ai.embedding.model=vertex # Deactivates auto-configuration
# (any value other than "transformers" deactivates)import org.springframework.ai.transformers.TransformersEmbeddingModel;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class CustomEmbeddingConfig {
@Bean
public TransformersEmbeddingModel embeddingModel() throws Exception {
TransformersEmbeddingModel model = new TransformersEmbeddingModel();
model.setGpuDeviceId(0);
model.afterPropertiesSet();
return model;
}
// @ConditionalOnMissingBean prevents auto-configuration from creating another bean
}Effect: Your custom bean is used instead of auto-configured bean. No need to exclude auto-configuration.
When auto-configuration is disabled or overridden, create your own bean:
import org.springframework.ai.transformers.TransformersEmbeddingModel;
import org.springframework.ai.document.MetadataMode;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import io.micrometer.observation.ObservationRegistry;
import java.util.Map;
@Configuration
public class CustomEmbeddingConfig {
@Bean
public TransformersEmbeddingModel embeddingModel(
ObservationRegistry observationRegistry) throws Exception {
TransformersEmbeddingModel model = new TransformersEmbeddingModel(
MetadataMode.EMBED,
observationRegistry
);
// Custom model configuration
model.setModelResource("classpath:/models/custom-model.onnx");
model.setTokenizerResource("classpath:/models/custom-tokenizer.json");
// Custom tokenizer options
model.setTokenizerOptions(Map.of(
"addSpecialTokens", "true",
"modelMaxLength", "512"
));
// GPU acceleration
model.setGpuDeviceId(0);
// Custom cache directory
model.setResourceCacheDirectory("/custom/cache/path");
// Custom model output name
model.setModelOutputName("last_hidden_state");
// CRITICAL: Manual initialization required
model.afterPropertiesSet();
return model;
}
}Important: When creating the bean manually, you must call afterPropertiesSet() before returning the bean.
@Configuration
public class RobustEmbeddingConfig {
private static final Logger logger = LoggerFactory.getLogger(RobustEmbeddingConfig.class);
@Bean
public TransformersEmbeddingModel embeddingModel(
ObservationRegistry observationRegistry) throws Exception {
TransformersEmbeddingModel model = new TransformersEmbeddingModel(
MetadataMode.NONE,
observationRegistry
);
// Try GPU first, fall back to CPU
model.setGpuDeviceId(0);
model.setModelResource("classpath:/models/model.onnx");
model.setTokenizerResource("classpath:/tokenizers/tokenizer.json");
try {
model.afterPropertiesSet();
logger.info("Initialized embedding model with GPU");
} catch (Exception e) {
if (e.getMessage().contains("CUDA") || e.getMessage().contains("GPU")) {
logger.warn("GPU initialization failed, falling back to CPU", e);
model = new TransformersEmbeddingModel(MetadataMode.NONE, observationRegistry);
model.setGpuDeviceId(-1); // CPU
model.setModelResource("classpath:/models/model.onnx");
model.setTokenizerResource("classpath:/tokenizers/tokenizer.json");
model.afterPropertiesSet();
logger.info("Initialized embedding model with CPU");
} else {
throw e;
}
}
return model;
}
}@Configuration
public class ConditionalEmbeddingConfig {
@Value("${app.embedding.use-gpu:false}")
private boolean useGpu;
@Value("${app.embedding.model-path}")
private String modelPath;
@Bean
public TransformersEmbeddingModel embeddingModel() throws Exception {
TransformersEmbeddingModel model = new TransformersEmbeddingModel();
// Configure from application properties
model.setGpuDeviceId(useGpu ? 0 : -1);
model.setModelResource(modelPath);
model.afterPropertiesSet();
return model;
}
}The auto-configuration integrates seamlessly with Spring Boot:
<dependencies>
<!-- Spring AI Transformers (includes auto-configuration) -->
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-transformers</artifactId>
<version>1.1.2</version>
</dependency>
<!-- Spring Boot Starter -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<!-- ONNX Runtime (transitive dependency) -->
<!-- Included automatically by spring-ai-transformers -->
<!-- HuggingFace Tokenizers (transitive dependency) -->
<!-- Included automatically by spring-ai-transformers -->
</dependencies>For GPU Support:
<dependencies>
<!-- Replace onnxruntime with onnxruntime-gpu -->
<dependency>
<groupId>com.microsoft.onnxruntime</groupId>
<artifactId>onnxruntime-gpu</artifactId>
<version>1.16.0</version>
</dependency>
</dependencies>See Spring Boot Configuration for detailed property reference:
# Enable/configure auto-configuration
spring.ai.embedding.model=transformers
# Model configuration
spring.ai.embedding.transformer.onnx.model-uri=classpath:/models/model.onnx
spring.ai.embedding.transformer.onnx.gpu-device-id=0
# Tokenizer configuration
spring.ai.embedding.transformer.tokenizer.uri=classpath:/tokenizers/tokenizer.json
# Cache configuration
spring.ai.embedding.transformer.cache.directory=/var/cache/models
spring.ai.embedding.transformer.cache.enabled=trueSpring Boot discovers the auto-configuration via META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports:
org.springframework.ai.model.transformers.autoconfigure.TransformersEmbeddingModelAutoConfigurationHow Discovery Works:
META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.importsVerifying Auto-Configuration:
# Enable debug logging
logging.level.org.springframework.boot.autoconfigure=DEBUGDebug Output:
# Positive match (auto-configuration active)
TransformersEmbeddingModelAutoConfiguration matched:
- @ConditionalOnClass found required classes
- @ConditionalOnProperty matched
- @ConditionalOnMissingBean matched
# Negative match (auto-configuration inactive)
TransformersEmbeddingModelAutoConfiguration did not match:
- @ConditionalOnClass did not find required class 'OrtSession'package org.springframework.ai.model.transformers.autoconfigure;
@AutoConfiguration
@EnableConfigurationProperties(TransformersEmbeddingModelProperties.class)
@ConditionalOnProperty(
name = "spring.ai.embedding.model",
havingValue = "transformers",
matchIfMissing = true
)
@ConditionalOnClass({
OrtSession.class,
HuggingFaceTokenizer.class,
TransformersEmbeddingModel.class
})
public class TransformersEmbeddingModelAutoConfiguration {
@Bean
@ConditionalOnMissingBean
public TransformersEmbeddingModel embeddingModel(
TransformersEmbeddingModelProperties properties,
ObjectProvider<ObservationRegistry> observationRegistry,
ObjectProvider<EmbeddingModelObservationConvention> observationConvention
) throws Exception;
}package org.springframework.beans.factory;
/**
* Spring Framework ObjectProvider interface for optional dependency injection.
*/
public interface ObjectProvider<T> extends ObjectFactory<T>, Iterable<T> {
/**
* Get object if available, otherwise use default supplier.
* Returns non-null value even if no bean exists.
*
* @param defaultSupplier Supplier for default value
* @return Bean instance or default value
*/
T getIfUnique(Supplier<T> defaultSupplier);
/**
* Execute action if object is available.
* Does nothing if no bean exists.
*
* @param action Consumer to execute with bean
*/
void ifAvailable(Consumer<T> action);
/**
* Get object if available, otherwise return null.
*
* @return Bean instance or null
*/
T getIfAvailable();
/**
* Get object, throw exception if not available.
*
* @return Bean instance
* @throws NoSuchBeanDefinitionException if not available
*/
T getObject() throws BeansException;
}Usage: Provides optional dependency injection. If the bean exists, it's used; otherwise, a default or no-op is used.
/**
* Indicates that a class provides auto-configuration.
*/
@AutoConfiguration
public @interface AutoConfiguration {
// Auto-configuration ordering and dependencies
}
/**
* Enables support for ConfigurationProperties beans.
*/
@EnableConfigurationProperties
public @interface EnableConfigurationProperties {
Class<?>[] value();
}
/**
* Conditional annotation based on property value.
*/
@ConditionalOnProperty
public @interface ConditionalOnProperty {
String[] name(); // Property name(s)
String havingValue(); // Expected value
boolean matchIfMissing(); // Match if property not set
}
/**
* Conditional annotation based on class presence.
*/
@ConditionalOnClass
public @interface ConditionalOnClass {
Class<?>[] value(); // Required classes
}
/**
* Conditional annotation based on missing bean.
*/
@ConditionalOnMissingBean
public @interface ConditionalOnMissingBean {
Class<?>[] value(); // Bean types that must not exist
}Symptoms: No TransformersEmbeddingModel bean found in context
@Service
public class MyService {
@Autowired
private TransformersEmbeddingModel model; // Null or NoSuchBeanDefinitionException
}Possible Causes:
<!-- Check pom.xml includes all required dependencies -->
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-transformers</artifactId>
<version>1.1.2</version>
</dependency>Verify:
mvn dependency:tree | grep -E "onnxruntime|tokenizers|spring-ai-transformers"# Check application.properties
spring.ai.embedding.model=openai # Should be "transformers" or not setFix:
spring.ai.embedding.model=transformers
# OR remove property to use default@Configuration
public class SomeConfig {
@Bean
public TransformersEmbeddingModel myCustomBean() {
// Custom bean prevents auto-configuration
}
}Solution: Remove custom bean definition or use it intentionally.
# Check application.properties
spring.autoconfigure.exclude=org.springframework.ai.model.transformers.autoconfigure.TransformersEmbeddingModelAutoConfigurationFix: Remove exclusion or add custom bean definition.
Debugging:
# Enable auto-configuration report
debug=true
# Enable detailed logging
logging.level.org.springframework.boot.autoconfigure=DEBUG
logging.level.org.springframework.ai=DEBUGCheck startup output for:
============================
CONDITIONS EVALUATION REPORT
============================
Positive matches:
-----------------
TransformersEmbeddingModelAutoConfiguration matched:
- ...
Negative matches:
-----------------
TransformersEmbeddingModelAutoConfiguration:
Did not match:
- @ConditionalOnClass did not find required class 'ai.onnxruntime.OrtSession'Symptoms: Bean creation fails with exceptions
Error creating bean with name 'embeddingModel':
Exception during initialization: ...Possible Causes:
spring.ai.embedding.transformer.onnx.model-uri=https://invalid-url.com/model.onnxSymptoms:
Caused by: java.net.UnknownHostException: invalid-url.comSolutions:
classpath:/models/model.onnxspring.ai.embedding.transformer.cache.directory=/root/cacheSymptoms:
Caused by: java.io.IOException: Permission deniedSolutions:
${user.home}/.spring-ai/cachespring.ai.embedding.transformer.onnx.gpu-device-id=0Symptoms:
Caused by: ai.onnxruntime.OrtException: CUDA errorSolutions:
onnxruntime-gpu dependencygpu-device-id=-1spring.ai.embedding.transformer.onnx.gpu-device-id=invalidSymptoms:
Caused by: java.lang.NumberFormatExceptionSolutions:
Debugging Initialization:
@Configuration
public class DebugConfig {
@Bean
public TransformersEmbeddingModel embeddingModel(
TransformersEmbeddingModelProperties properties) {
// Log configuration
System.out.println("Model URI: " + properties.getOnnx().getModelUri());
System.out.println("GPU Device: " + properties.getOnnx().getGpuDeviceId());
System.out.println("Cache Dir: " + properties.getCache().getDirectory());
try {
TransformersEmbeddingModel model = new TransformersEmbeddingModel();
// Configure...
model.afterPropertiesSet();
System.out.println("Initialization successful");
return model;
} catch (Exception e) {
System.err.println("Initialization failed: " + e.getMessage());
e.printStackTrace();
throw new RuntimeException(e);
}
}
}Symptoms: Bean uses defaults instead of configured values
spring.ai.embedding.transformer.onnx.gpu-device-id=0
# But model runs on CPUPossible Causes:
# ❌ WRONG
spring.ai.embedding.onnx.gpu-device-id=0
# ✅ CORRECT
spring.ai.embedding.transformer.onnx.gpu-device-id=0# ❌ WRONG
spring.ai.embedding.transformer.onnx.gpu-id=0
# ✅ CORRECT
spring.ai.embedding.transformer.onnx.gpu-device-id=0# Check which properties file is used
java -jar app.jar --spring.config.location=classpath:/application.properties
# Verify properties are loaded
java -jar app.jar --debug# application-prod.properties
spring.ai.embedding.transformer.onnx.gpu-device-id=0# Activate profile
java -jar app.jar --spring.profiles.active=prodSolutions:
src/main/resources/)logging.level.org.springframework.boot=DEBUGdebug=trueProperty Precedence:
1. Command-line arguments (highest)
--spring.ai.embedding.transformer.onnx.gpu-device-id=0
2. Java System properties
-Dspring.ai.embedding.transformer.onnx.gpu-device-id=0
3. OS environment variables
SPRING_AI_EMBEDDING_TRANSFORMER_ONNX_GPUDEVICEID=0
4. Profile-specific properties (application-{profile}.properties)
5. application.properties in current directory
6. application.properties in classpath
7. Default values (lowest)Symptoms: Application takes long time to start
Cause: Model download on first startup
Solutions:
# 1. Use local/classpath resources
spring.ai.embedding.transformer.onnx.model-uri=classpath:/models/model.onnx
spring.ai.embedding.transformer.tokenizer.uri=classpath:/tokenizers/tokenizer.json
# 2. Pre-warm cache during build/deployment
# Download models before starting application
# 3. Use persistent cache directory
spring.ai.embedding.transformer.cache.directory=/persistent/cache
# 4. Enable lazy initialization for development
spring.main.lazy-initialization=true
# (Delays model loading until first use)Scenario: Need multiple embedding model instances with different configurations
Solution: Use custom beans with qualifiers
@Configuration
public class MultiModelConfig {
@Bean
@Qualifier("cpuModel")
public TransformersEmbeddingModel cpuEmbeddingModel() throws Exception {
TransformersEmbeddingModel model = new TransformersEmbeddingModel();
model.setGpuDeviceId(-1);
model.afterPropertiesSet();
return model;
}
@Bean
@Qualifier("gpuModel")
public TransformersEmbeddingModel gpuEmbeddingModel() throws Exception {
TransformersEmbeddingModel model = new TransformersEmbeddingModel();
model.setGpuDeviceId(0);
model.afterPropertiesSet();
return model;
}
}
@Service
public class MultiModelService {
@Autowired
@Qualifier("cpuModel")
private TransformersEmbeddingModel cpuModel;
@Autowired
@Qualifier("gpuModel")
private TransformersEmbeddingModel gpuModel;
public float[] embedWithCpu(String text) {
return cpuModel.embed(text);
}
public float[] embedWithGpu(String text) {
return gpuModel.embed(text);
}
}// ✅ GOOD: Let auto-configuration handle setup
@Service
public class EmbeddingService {
private final TransformersEmbeddingModel model;
public EmbeddingService(TransformersEmbeddingModel model) {
this.model = model;
}
}# ✅ GOOD: Externalized configuration
spring.ai.embedding.transformer.onnx.gpu-device-id=${GPU_DEVICE_ID:0}
spring.ai.embedding.transformer.cache.directory=${CACHE_DIR:/var/cache/models}# application-dev.properties
spring.ai.embedding.transformer.onnx.gpu-device-id=-1
spring.ai.embedding.transformer.cache.directory=./cache
# application-prod.properties
spring.ai.embedding.transformer.onnx.gpu-device-id=0
spring.ai.embedding.transformer.cache.directory=/var/cache/spring-ai// Use custom bean only if you need:
// - Multiple model instances
// - Complex initialization logic
// - Runtime configuration
// - Special error handling
@Configuration
public class CustomConfig {
@Bean
public TransformersEmbeddingModel embeddingModel() throws Exception {
// Custom logic here
}
}tessl i tessl/maven-org-springframework-ai--spring-ai-transformers@1.1.1