CLI utility and Python library for interacting with Large Language Models from multiple providers including OpenAI, Anthropic, Google, and Meta plus locally installed models.
—
Extensible plugin architecture with hook specifications for registering models, tools, templates, and commands. This module enables third-party extensions and custom integrations through a comprehensive plugin system built on Pluggy.
Functions to discover and work with plugins in the LLM ecosystem.
def get_plugins(all: bool = False) -> List[dict]:
"""
Get list of registered plugins with metadata.
Args:
all: If True, include default plugins. If False, only show third-party plugins.
Returns:
List of plugin dictionaries containing:
- name: Plugin name
- hooks: List of hook names implemented
- version: Plugin version (if available)
"""
def load_plugins():
"""Load all registered plugins from entry points."""The global plugin manager instance that coordinates all plugin operations.
pm: PluginManager
"""
Global plugin manager instance using Pluggy framework.
Provides hook calling and plugin management functionality.
Available hook callers:
- pm.hook.register_models
- pm.hook.register_embedding_models
- pm.hook.register_tools
- pm.hook.register_template_loaders
- pm.hook.register_fragment_loaders
- pm.hook.register_commands
"""Decorator for implementing plugin hooks.
@hookimpl
def plugin_function(*args, **kwargs):
"""
Decorator for implementing plugin hook functions.
Used to mark functions as implementations of specific hooks.
The function name should match the hook specification.
"""The plugin system defines several hook specifications that plugins can implement.
def register_models(register):
"""
Hook for registering LLM models.
Args:
register: Function to call with (model, async_model, aliases)
Example:
@hookimpl
def register_models(register):
register(MyModel("my-model"), aliases=["alias1", "alias2"])
"""
def register_embedding_models(register):
"""
Hook for registering embedding models.
Args:
register: Function to call with (model, aliases)
Example:
@hookimpl
def register_embedding_models(register):
register(MyEmbeddingModel("my-embeddings"), aliases=["embed"])
"""
def register_tools(register):
"""
Hook for registering tools and toolboxes.
Args:
register: Function to call with (tool_or_function, name)
Example:
@hookimpl
def register_tools(register):
register(my_function, name="my_tool")
register(MyToolbox, name="my_toolbox")
"""
def register_template_loaders(register):
"""
Hook for registering template loaders.
Args:
register: Function to call with (prefix, loader_function)
Example:
@hookimpl
def register_template_loaders(register):
register("yaml", yaml_template_loader)
"""
def register_fragment_loaders(register):
"""
Hook for registering fragment loaders.
Args:
register: Function to call with (prefix, loader_function)
Example:
@hookimpl
def register_fragment_loaders(register):
register("file", file_fragment_loader)
"""
def register_commands(cli):
"""
Hook for registering CLI commands.
Args:
cli: Click CLI group to add commands to
Example:
@hookimpl
def register_commands(cli):
@cli.command()
def my_command():
click.echo("Hello from plugin!")
"""import llm
# Create a simple plugin file (e.g., my_plugin.py)
@llm.hookimpl
def register_models(register):
"""Register a custom model."""
class EchoModel(llm.Model):
"""A model that echoes back the input."""
model_id = "echo"
def prompt(self, prompt, **kwargs):
# Simple echo implementation
return EchoResponse(f"Echo: {prompt}")
class EchoResponse(llm.Response):
def __init__(self, text):
self._text = text
def text(self):
return self._text
def __iter__(self):
yield self._text
# Register the model with aliases
register(EchoModel(), aliases=["echo", "test"])
# Plugin is automatically discovered and loadedimport llm
import requests
@llm.hookimpl
def register_tools(register):
"""Register HTTP tools."""
def http_get(url: str) -> str:
"""Make HTTP GET request and return response text."""
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
return response.text[:1000] # Truncate for safety
except requests.RequestException as e:
raise llm.CancelToolCall(f"HTTP request failed: {e}")
def http_post(url: str, data: str) -> str:
"""Make HTTP POST request with data."""
try:
response = requests.post(url, data=data, timeout=10)
response.raise_for_status()
return f"POST successful: {response.status_code}"
except requests.RequestException as e:
raise llm.CancelToolCall(f"HTTP POST failed: {e}")
# Register individual tools
register(http_get, name="http_get")
register(http_post, name="http_post")
# Register a toolbox
class HttpToolbox(llm.Toolbox):
"""Collection of HTTP tools."""
def tools(self):
return [
llm.Tool.function(http_get),
llm.Tool.function(http_post),
llm.Tool.function(self.http_head)
]
def http_head(self, url: str) -> dict:
"""Make HTTP HEAD request and return headers."""
try:
response = requests.head(url, timeout=10)
response.raise_for_status()
return dict(response.headers)
except requests.RequestException as e:
raise llm.CancelToolCall(f"HTTP HEAD failed: {e}")
register(HttpToolbox, name="http")
# Use the registered tools
tools = llm.get_tools()
http_tools = [t for name, t in tools.items() if name.startswith("http")]import llm
import yaml
import json
@llm.hookimpl
def register_template_loaders(register):
"""Register custom template loaders."""
def yaml_loader(spec: str) -> llm.Template:
"""Load template from YAML specification."""
try:
config = yaml.safe_load(spec)
return llm.Template(
name=config['name'],
prompt=config.get('prompt'),
system=config.get('system'),
model=config.get('model'),
defaults=config.get('defaults', {}),
options=config.get('options', {})
)
except Exception as e:
raise ValueError(f"Invalid YAML template: {e}")
def json_loader(spec: str) -> llm.Template:
"""Load template from JSON specification."""
try:
config = json.loads(spec)
return llm.Template(**config)
except Exception as e:
raise ValueError(f"Invalid JSON template: {e}")
def file_loader(file_path: str) -> llm.Template:
"""Load template from file."""
import os
if not os.path.exists(file_path):
raise FileNotFoundError(f"Template file not found: {file_path}")
with open(file_path) as f:
content = f.read()
# Simple template format
lines = content.strip().split('\n')
name = lines[0].replace('# ', '')
prompt = '\n'.join(lines[1:])
return llm.Template(name=name, prompt=prompt)
register("yaml", yaml_loader)
register("json", json_loader)
register("file", file_loader)
# Template loaders are now available
loaders = llm.get_template_loaders()
print(f"Available loaders: {list(loaders.keys())}")import llm
import os
@llm.hookimpl
def register_fragment_loaders(register):
"""Register fragment loaders for modular content."""
def file_fragment_loader(file_path: str) -> llm.Fragment:
"""Load fragment from file."""
if not os.path.exists(file_path):
raise FileNotFoundError(f"Fragment file not found: {file_path}")
with open(file_path) as f:
content = f.read()
return llm.Fragment(content, source=f"file:{file_path}")
def url_fragment_loader(url: str) -> llm.Fragment:
"""Load fragment from URL."""
import requests
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
return llm.Fragment(response.text, source=f"url:{url}")
except requests.RequestException as e:
raise ValueError(f"Could not load fragment from {url}: {e}")
def env_fragment_loader(env_var: str) -> llm.Fragment:
"""Load fragment from environment variable."""
value = os.getenv(env_var)
if value is None:
raise ValueError(f"Environment variable not set: {env_var}")
return llm.Fragment(value, source=f"env:{env_var}")
register("file", file_fragment_loader)
register("url", url_fragment_loader)
register("env", env_fragment_loader)
# Fragment loaders enable modular content
fragment_loaders = llm.get_fragment_loaders()
print(f"Fragment loaders: {list(fragment_loaders.keys())}")import llm
import click
@llm.hookimpl
def register_commands(cli):
"""Register custom CLI commands."""
@cli.group()
def analyze():
"""Text analysis commands."""
pass
@analyze.command()
@click.argument("text")
@click.option("--model", "-m", default="gpt-3.5-turbo")
def sentiment(text, model):
"""Analyze sentiment of text."""
model_obj = llm.get_model(model)
response = model_obj.prompt(f"Analyze the sentiment of this text: {text}")
click.echo(response.text())
@analyze.command()
@click.argument("text")
@click.option("--model", "-m", default="gpt-3.5-turbo")
def summarize(text, model):
"""Summarize text."""
model_obj = llm.get_model(model)
response = model_obj.prompt(f"Summarize this text in one sentence: {text}")
click.echo(response.text())
@cli.command()
@click.option("--format", type=click.Choice(["json", "yaml", "table"]), default="table")
def plugin_info(format):
"""Show information about loaded plugins."""
plugins = llm.get_plugins(all=True)
if format == "json":
import json
click.echo(json.dumps(plugins, indent=2))
elif format == "yaml":
import yaml
click.echo(yaml.dump(plugins))
else:
# Table format
for plugin in plugins:
click.echo(f"Plugin: {plugin['name']}")
click.echo(f" Hooks: {', '.join(plugin['hooks'])}")
if 'version' in plugin:
click.echo(f" Version: {plugin['version']}")
click.echo()
# Commands are now available: llm analyze sentiment "I love this!"# File: llm_weather_plugin.py
import llm
import requests
import json
class WeatherModel(llm.KeyModel):
"""Model that provides weather information."""
model_id = "weather"
needs_key = "weather"
key_env_var = "WEATHER_API_KEY"
def prompt(self, prompt, **kwargs):
# Extract location from prompt (simplified)
location = prompt.text() if hasattr(prompt, 'text') else str(prompt)
# Get weather data
api_key = self.get_key()
url = f"http://api.openweathermap.org/data/2.5/weather"
params = {"q": location, "appid": api_key, "units": "metric"}
response = requests.get(url, params=params)
weather_data = response.json()
if response.status_code == 200:
temp = weather_data["main"]["temp"]
desc = weather_data["weather"][0]["description"]
result = f"Weather in {location}: {temp}°C, {desc}"
else:
result = f"Could not get weather for {location}"
return WeatherResponse(result)
class WeatherResponse(llm.Response):
def __init__(self, text):
self._text = text
def text(self):
return self._text
def __iter__(self):
yield self._text
@llm.hookimpl
def register_models(register):
"""Register weather model."""
register(WeatherModel(), aliases=["weather", "forecast"])
@llm.hookimpl
def register_tools(register):
"""Register weather tools."""
def current_weather(location: str) -> str:
"""Get current weather for a location."""
# Implementation would use weather API
return f"Current weather in {location}: 22°C, sunny"
def weather_forecast(location: str, days: int = 5) -> str:
"""Get weather forecast for a location."""
return f"{days}-day forecast for {location}: Mostly sunny"
register(current_weather, name="weather")
register(weather_forecast, name="forecast")
@llm.hookimpl
def register_commands(cli):
"""Register weather CLI commands."""
@cli.command()
@click.argument("location")
def weather(location):
"""Get weather for a location."""
model = llm.get_model("weather")
response = model.prompt(location)
click.echo(response.text())
# Entry point in setup.py or pyproject.toml:
# [project.entry-points."llm"]
# weather = "llm_weather_plugin"import llm
# Load all plugins
llm.load_plugins()
# Get plugin information
plugins = llm.get_plugins(all=True)
print("All plugins:")
for plugin in plugins:
print(f"- {plugin['name']}: {plugin['hooks']}")
if 'version' in plugin:
print(f" Version: {plugin['version']}")
# Get only third-party plugins
third_party = llm.get_plugins(all=False)
print(f"\nThird-party plugins: {len(third_party)}")
# Inspect available extensions
tools = llm.get_tools()
print(f"Available tools: {len(tools)}")
template_loaders = llm.get_template_loaders()
print(f"Template loaders: {list(template_loaders.keys())}")
fragment_loaders = llm.get_fragment_loaders()
print(f"Fragment loaders: {list(fragment_loaders.keys())}")
# Direct plugin manager access
plugin_names = [llm.pm.get_name(plugin) for plugin in llm.pm.get_plugins()]
print(f"Plugin manager has: {plugin_names}")import llm
# Example of well-structured plugin
class MyPlugin:
"""Example plugin demonstrating best practices."""
def __init__(self):
self.initialized = False
def ensure_initialized(self):
if not self.initialized:
# Lazy initialization
self.setup_resources()
self.initialized = True
def setup_resources(self):
# Initialize any required resources
pass
# Global plugin instance
plugin_instance = MyPlugin()
@llm.hookimpl
def register_models(register):
"""Register models with proper error handling."""
try:
plugin_instance.ensure_initialized()
# Register models
pass
except Exception as e:
# Log error but don't crash plugin loading
print(f"Failed to register models: {e}")
@llm.hookimpl
def register_tools(register):
"""Register tools with validation."""
plugin_instance.ensure_initialized()
def validated_tool(input_data: str) -> str:
"""Tool with input validation."""
if not input_data.strip():
raise llm.CancelToolCall("Input cannot be empty")
# Process input
return f"Processed: {input_data}"
register(validated_tool, name="validated_tool")
# Plugin metadata (for entry points)
__version__ = "1.0.0"
__author__ = "Plugin Developer"
__description__ = "Example plugin for LLM"This comprehensive plugin system enables extensive customization and integration of the LLM package with external services, custom models, specialized tools, and domain-specific functionality while maintaining a clean and consistent API.
Install with Tessl CLI
npx tessl i tessl/pypi-llm