CLI utility and Python library for interacting with Large Language Models from multiple providers including OpenAI, Anthropic, Google, and Meta plus locally installed models.
—
User configuration directory management, API key storage and retrieval, model aliases, and default settings with environment variable support. This module provides comprehensive configuration management for the LLM package.
The package maintains a user-specific configuration directory for storing settings, keys, and data.
def user_dir() -> pathlib.Path:
"""
Get user configuration directory path.
Returns:
Path to user configuration directory. Creates directory if it doesn't exist.
Notes:
- Uses LLM_USER_PATH environment variable if set
- Otherwise uses platform-specific app directory
- Directory is created automatically if it doesn't exist
"""Comprehensive system for storing, retrieving, and managing API keys from multiple sources.
def get_key(
input: Optional[str] = None,
*,
alias: Optional[str] = None,
env: Optional[str] = None
) -> Optional[str]:
"""
Retrieve API key from various sources with fallback hierarchy.
Args:
input: Direct key value or alias name to lookup
alias: Key alias to look up in stored keys
env: Environment variable name to check
Returns:
API key string or None if not found
Notes:
Key resolution order:
1. If input matches a stored alias, return that key
2. If input is provided and not an alias, return input directly
3. If alias matches a stored key, return that key
4. If env variable is set, return its value
5. Return None if no key found
"""
def load_keys() -> dict:
"""
Load stored API keys from configuration file.
Returns:
Dictionary of alias -> key mappings from keys.json
"""System for creating and managing model name aliases for convenience and consistency.
def set_alias(alias: str, model_id_or_alias: str):
"""
Set an alias to point to a specific model.
Args:
alias: Alias name to create
model_id_or_alias: Model ID or existing alias to point to
Notes:
- Resolves model_id_or_alias to actual model ID if possible
- Falls back to exact string if model not found
- Saves alias to aliases.json in user directory
"""
def remove_alias(alias: str):
"""
Remove an existing alias.
Args:
alias: Alias name to remove
Raises:
KeyError: If alias doesn't exist or aliases.json is invalid
"""Functions to manage default model settings for various operations.
def get_default_model() -> str:
"""
Get the current default model name.
Returns:
Default model name or DEFAULT_MODEL constant if not configured
"""
def set_default_model(model: Optional[str]):
"""
Set the default model.
Args:
model: Model name to set as default, or None to clear
Notes:
- Setting to None removes the default_model.txt file
- Uses DEFAULT_MODEL constant when no default is set
"""
def get_default_embedding_model() -> Optional[str]:
"""
Get the current default embedding model name.
Returns:
Default embedding model name or None if not configured
"""
def set_default_embedding_model(model: Optional[str]):
"""
Set the default embedding model.
Args:
model: Embedding model name to set as default, or None to clear
"""The user directory contains several configuration files:
Stores API keys mapped to aliases:
{
"openai": "sk-...",
"anthropic": "sk-ant-...",
"my_key": "custom-key-value"
}Maps model aliases to actual model IDs:
{
"fast": "gpt-3.5-turbo",
"smart": "gpt-4",
"claude": "claude-3-sonnet-20240229"
}Contains the name of the default model:
gpt-4Contains the name of the default embedding model:
text-embedding-ada-002import llm
# Get user configuration directory
config_dir = llm.user_dir()
print(f"Configuration directory: {config_dir}")
# Check what files exist
import os
config_files = os.listdir(config_dir)
print(f"Configuration files: {config_files}")
# Directory is created automatically if it doesn't exist
print(f"Directory exists: {config_dir.exists()}")import llm
# Load all stored keys
keys = llm.load_keys()
print(f"Stored key aliases: {list(keys.keys())}")
# Get key by alias (from keys.json)
openai_key = llm.get_key(alias="openai")
if openai_key:
print("OpenAI key found")
else:
print("OpenAI key not configured")
# Get key from environment variable
env_key = llm.get_key(env="OPENAI_API_KEY")
if env_key:
print("Key found in environment")
# Hierarchy: direct input -> alias -> env variable
api_key = llm.get_key(
input="my_openai_key", # First try as alias
alias="openai", # Fallback to this alias
env="OPENAI_API_KEY" # Final fallback to env var
)
# Using with model that needs a key
if api_key:
model = llm.get_model("gpt-4")
# Key is used automatically by KeyModel implementationsimport llm
# Set up convenient aliases
llm.set_alias("fast", "gpt-3.5-turbo")
llm.set_alias("smart", "gpt-4")
llm.set_alias("claude", "claude-3-sonnet-20240229")
# Use aliases instead of full model names
fast_model = llm.get_model("fast")
smart_model = llm.get_model("smart")
print(f"Fast model: {fast_model.model_id}")
print(f"Smart model: {smart_model.model_id}")
# Set alias to another alias (chains)
llm.set_alias("default", "smart")
default_model = llm.get_model("default")
print(f"Default model: {default_model.model_id}")
# Remove alias when no longer needed
llm.remove_alias("default")import llm
# Check current default
current_default = llm.get_default_model()
print(f"Current default model: {current_default}")
# Set new default
llm.set_default_model("gpt-4")
print(f"New default: {llm.get_default_model()}")
# Use default model (no name specified)
model = llm.get_model() # Uses default automatically
print(f"Got model: {model.model_id}")
# Clear default (reverts to DEFAULT_MODEL constant)
llm.set_default_model(None)
print(f"After clearing: {llm.get_default_model()}")
# Embedding model defaults work similarly
llm.set_default_embedding_model("text-embedding-ada-002")
embedding_model = llm.get_embedding_model() # Uses default if availableimport llm
import os
# Set environment variables for testing
os.environ["LLM_USER_PATH"] = "/custom/config/path"
os.environ["OPENAI_API_KEY"] = "sk-test-key-from-env"
# Custom user directory
custom_dir = llm.user_dir()
print(f"Custom config directory: {custom_dir}")
# Key from environment
env_key = llm.get_key(env="OPENAI_API_KEY")
print(f"Key from environment: {env_key[:10]}...")
# Hierarchical key resolution
# Try alias first, fall back to environment
key = llm.get_key(alias="openai", env="OPENAI_API_KEY")
print("Using hierarchical key resolution")import llm
import json
# Manually inspect configuration files
config_dir = llm.user_dir()
# Check aliases
aliases_file = config_dir / "aliases.json"
if aliases_file.exists():
with open(aliases_file) as f:
aliases = json.load(f)
print(f"Current aliases: {aliases}")
# Check keys (be careful with security)
keys_file = config_dir / "keys.json"
if keys_file.exists():
keys = llm.load_keys()
# Don't print actual keys!
print(f"Configured key aliases: {list(keys.keys())}")
# Check default models
default_file = config_dir / "default_model.txt"
if default_file.exists():
with open(default_file) as f:
default_model = f.read().strip()
print(f"Default model from file: {default_model}")import llm
# Set up development vs production configurations
def setup_dev_config():
"""Configure for development environment."""
llm.set_alias("dev_chat", "gpt-3.5-turbo")
llm.set_alias("dev_smart", "gpt-4")
llm.set_default_model("dev_chat")
llm.set_default_embedding_model("text-embedding-ada-002")
def setup_prod_config():
"""Configure for production environment."""
llm.set_alias("prod_chat", "gpt-4")
llm.set_alias("prod_embeddings", "text-embedding-3-large")
llm.set_default_model("prod_chat")
llm.set_default_embedding_model("prod_embeddings")
# Environment-based configuration
import os
if os.getenv("ENV") == "production":
setup_prod_config()
else:
setup_dev_config()
# Use environment-appropriate defaults
model = llm.get_model() # Gets appropriate default
embedding_model = llm.get_embedding_model() # Gets appropriate defaultimport llm
def validate_configuration():
"""Validate current configuration setup."""
issues = []
# Check if user directory is accessible
try:
config_dir = llm.user_dir()
if not config_dir.exists():
issues.append("Configuration directory not found")
except Exception as e:
issues.append(f"Cannot access configuration directory: {e}")
# Check for required keys
required_keys = ["openai", "anthropic"]
keys = llm.load_keys()
for required_key in required_keys:
if required_key not in keys:
issues.append(f"Missing required key alias: {required_key}")
# Check default model is valid
try:
default_model = llm.get_model()
print(f"Default model OK: {default_model.model_id}")
except Exception as e:
issues.append(f"Default model invalid: {e}")
# Report issues
if issues:
print("Configuration issues found:")
for issue in issues:
print(f"- {issue}")
else:
print("Configuration validation passed")
validate_configuration()import llm
import shutil
import json
from datetime import datetime
def backup_configuration():
"""Create backup of configuration directory."""
config_dir = llm.user_dir()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_dir = config_dir.parent / f"llm_config_backup_{timestamp}"
shutil.copytree(config_dir, backup_dir)
print(f"Configuration backed up to: {backup_dir}")
return backup_dir
def export_config():
"""Export configuration to a portable format."""
config_dir = llm.user_dir()
export_data = {
"aliases": {},
"default_model": None,
"default_embedding_model": None,
"export_timestamp": datetime.now().isoformat()
}
# Export aliases (safe to share)
aliases_file = config_dir / "aliases.json"
if aliases_file.exists():
with open(aliases_file) as f:
export_data["aliases"] = json.load(f)
# Export defaults
export_data["default_model"] = llm.get_default_model()
export_data["default_embedding_model"] = llm.get_default_embedding_model()
# Note: Don't export keys for security
print("Configuration exported (keys excluded for security)")
return export_data
# Create backup before major changes
backup_path = backup_configuration()
# Export shareable configuration
config_export = export_config()
print(json.dumps(config_export, indent=2))This comprehensive configuration system provides secure, flexible management of API keys, model preferences, and user settings while supporting both programmatic and file-based configuration approaches.
Install with Tessl CLI
npx tessl i tessl/pypi-llm