Standard tests for LangChain implementations
—
Lightweight test classes for basic functionality verification, focusing on object creation, parameter validation, and method availability without requiring external API calls or network dependencies. Unit tests provide fast feedback during development and ensure proper integration configuration.
Unit testing for chat model implementations with initialization, environment variable configuration, and basic functionality verification.
from langchain_tests.unit_tests import ChatModelUnitTests
class ChatModelUnitTests(ChatModelTests):
"""Unit tests for chat models."""
# Required abstract properties
@property
def chat_model_class(self):
"""Chat model class to test."""
@property
def chat_model_params(self) -> dict:
"""Model initialization parameters."""
# Optional configuration
@property
def init_from_env_params(self) -> dict:
"""Parameters for environment variable initialization test."""
def test_init(self) -> None:
"""Test model initialization with provided parameters."""
def test_init_from_env(self) -> None:
"""Test initialization from environment variables."""
def test_init_streaming(self) -> None:
"""Test streaming initialization."""
def test_bind_tool_pydantic(self) -> None:
"""Test Pydantic tool binding."""
def test_with_structured_output(self) -> None:
"""Test structured output configuration."""
def test_standard_params(self) -> None:
"""Test standard parameter handling."""
def test_serdes(self) -> None:
"""Test serialization and deserialization."""
def test_init_time(self) -> None:
"""Benchmark initialization performance."""from langchain_tests.unit_tests import ChatModelUnitTests
from my_integration import MyChatModel
class TestMyChatModelUnit(ChatModelUnitTests):
@property
def chat_model_class(self):
return MyChatModel
@property
def chat_model_params(self):
return {
"api_key": "test-api-key",
"model": "test-model-name",
"temperature": 0.7
}
@property
def init_from_env_params(self):
return {
"model": "test-model-from-env"
}Unit testing for embeddings model implementations with initialization and configuration verification.
from langchain_tests.unit_tests import EmbeddingsUnitTests
class EmbeddingsUnitTests(EmbeddingsTests):
"""Unit tests for embeddings models."""
# Required abstract properties
@property
def embeddings_class(self):
"""Embeddings class to test."""
@property
def embedding_model_params(self) -> dict:
"""Model parameters for initialization."""
# Optional configuration
@property
def init_from_env_params(self) -> dict:
"""Parameters for environment variable initialization test."""
def test_init(self) -> None:
"""Test model initialization with provided parameters."""
def test_init_from_env(self) -> None:
"""Test initialization from environment variables."""from langchain_tests.unit_tests import EmbeddingsUnitTests
from my_integration import MyEmbeddings
class TestMyEmbeddingsUnit(EmbeddingsUnitTests):
@property
def embeddings_class(self):
return MyEmbeddings
@property
def embedding_model_params(self):
return {
"api_key": "test-api-key",
"model": "text-embedding-3-small"
}Unit testing for tool implementations with schema validation and parameter verification.
from langchain_tests.unit_tests import ToolsUnitTests
class ToolsUnitTests(ToolsTests):
"""Unit tests for tools."""
# Required abstract properties
@property
def tool_constructor(self):
"""Tool class or instance to test."""
@property
def tool_constructor_params(self) -> dict:
"""Constructor parameters for tool initialization."""
@property
def tool_invoke_params_example(self) -> dict:
"""Example parameters for tool invocation."""
# Optional configuration
@property
def init_from_env_params(self) -> dict:
"""Parameters for environment variable initialization test."""
def test_init(self) -> None:
"""Test tool initialization."""
def test_init_from_env(self) -> None:
"""Test initialization from environment variables."""
def test_has_name(self) -> None:
"""Test that tool has a name."""
def test_has_input_schema(self) -> None:
"""Test that tool has an input schema."""
def test_input_schema_matches_invoke_params(self) -> None:
"""Test that input schema matches example invoke parameters."""from langchain_tests.unit_tests import ToolsUnitTests
from my_integration import MyTool
class TestMyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self):
return MyTool
@property
def tool_constructor_params(self):
return {
"api_key": "test-api-key",
"base_url": "https://api.example.com"
}
@property
def tool_invoke_params_example(self):
return {
"query": "test query",
"limit": 10
}Base abstract class for chat model testing with extensive configuration options.
from langchain_tests.unit_tests.chat_models import ChatModelTests
class ChatModelTests(BaseStandardTests):
"""Base class for chat model testing."""
# Required abstract properties
@property
def chat_model_class(self):
"""Chat model class to test."""
@property
def chat_model_params(self) -> dict:
"""Model initialization parameters."""
# Feature support configuration
@property
def has_tool_calling(self) -> bool:
"""Whether the model supports tool calling. Default: False."""
@property
def tool_choice_value(self):
"""Tool choice parameter value. Default: None."""
@property
def has_tool_choice(self) -> bool:
"""Whether the model supports tool choice. Default: False."""
@property
def has_structured_output(self) -> bool:
"""Whether the model supports structured output. Default: False."""
@property
def structured_output_kwargs(self) -> dict:
"""Additional structured output parameters. Default: {}."""
@property
def supports_json_mode(self) -> bool:
"""Whether the model supports JSON mode. Default: False."""
# Multimodal input support
@property
def supports_image_inputs(self) -> bool:
"""Whether the model supports image inputs. Default: False."""
@property
def supports_image_urls(self) -> bool:
"""Whether the model supports image URLs. Default: False."""
@property
def supports_pdf_inputs(self) -> bool:
"""Whether the model supports PDF inputs. Default: False."""
@property
def supports_audio_inputs(self) -> bool:
"""Whether the model supports audio inputs. Default: False."""
@property
def supports_video_inputs(self) -> bool:
"""Whether the model supports video inputs. Default: False."""
# Performance and metadata
@property
def returns_usage_metadata(self) -> bool:
"""Whether the model returns usage metadata. Default: False."""
@property
def supported_usage_metadata_details(self) -> dict:
"""Usage metadata details configuration. Default: {}."""
# Input format support
@property
def supports_anthropic_inputs(self) -> bool:
"""Whether the model supports Anthropic-style inputs. Default: False."""
@property
def supports_image_tool_message(self) -> bool:
"""Whether the model supports image tool messages. Default: False."""
# Testing configuration
@property
def enable_vcr_tests(self) -> bool:
"""Whether to enable VCR (HTTP recording) tests. Default: True."""
# Fixtures
@pytest.fixture
def model(self):
"""Chat model fixture for testing."""
@pytest.fixture
def my_adder_tool(self):
"""Tool fixture for testing tool calling functionality."""Base abstract class for embeddings model testing.
from langchain_tests.unit_tests.embeddings import EmbeddingsTests
class EmbeddingsTests(BaseStandardTests):
"""Base class for embeddings testing."""
# Required abstract properties
@property
def embeddings_class(self):
"""Embeddings class to test."""
@property
def embedding_model_params(self) -> dict:
"""Model parameters for initialization."""
# Fixtures
@pytest.fixture
def model(self):
"""Embeddings model fixture for testing."""Base abstract class for tool testing.
from langchain_tests.unit_tests.tools import ToolsTests
class ToolsTests(BaseStandardTests):
"""Base class for tool testing."""
# Required abstract properties
@property
def tool_constructor(self):
"""Tool class or instance to test."""
@property
def tool_constructor_params(self) -> dict:
"""Constructor parameters for tool initialization."""
@property
def tool_invoke_params_example(self) -> dict:
"""Example parameters for tool invocation."""
# Fixtures
@pytest.fixture
def tool(self):
"""Tool fixture for testing."""Utility functions for generating test schemas and models.
from langchain_tests.unit_tests.chat_models import (
generate_schema_pydantic,
generate_schema_pydantic_v1_from_2
)
def generate_schema_pydantic():
"""Generate a Pydantic model for testing structured output."""
def generate_schema_pydantic_v1_from_2():
"""Generate a Pydantic V1 model from V2 for compatibility testing."""Foundation class providing override protection for all test classes.
from langchain_tests.base import BaseStandardTests
class BaseStandardTests:
"""Base class for all standard tests with override protection."""
def test_no_overrides_DO_NOT_OVERRIDE(self) -> None:
"""Ensures standard tests aren't overridden by implementation classes."""Unit tests include support for testing initialization from environment variables, which is crucial for production deployments where API keys and configuration are provided via environment variables rather than hardcoded parameters.
class TestMyModelUnit(ChatModelUnitTests):
@property
def init_from_env_params(self):
# Parameters to use when testing environment variable initialization
# These should NOT include sensitive data like API keys
return {
"model": "gpt-3.5-turbo",
"temperature": 0.5,
"max_tokens": 1000
}The test_init_from_env() method will attempt to initialize your model class with the specified parameters, expecting that sensitive configuration like API keys will be read from environment variables rather than passed as parameters.
Unit tests include basic performance benchmarking through the test_init_time() method, which measures model initialization time. This helps identify performance regressions and ensures initialization stays within acceptable bounds.
The benchmark uses pytest-benchmark integration and can be configured through pytest command line options for detailed performance analysis.
Install with Tessl CLI
npx tessl i tessl/pypi-langchain-tests