tessl install tessl/pypi-langsmith@0.6.1Python SDK for LangSmith Observability and Evaluation Platform
Operations for managing examples - individual records within datasets.
Operations for managing examples (records in datasets).
def create_example(
self,
inputs: dict,
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
outputs: Optional[dict] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
example_id: Optional[Union[str, UUID]] = None,
created_at: Optional[datetime] = None,
) -> Example:
"""
Create a single example in a dataset.
Parameters:
- inputs: Input data
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset (alternative to ID)
- outputs: Expected output data
- metadata: Example metadata
- split: Dataset split(s) (e.g., "train", "test")
- example_id: Preset example ID
- created_at: Creation timestamp
Returns:
Created Example object
"""def create_examples(
self,
*,
inputs: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[Optional[dict]]] = None,
metadata: Optional[Sequence[Optional[dict]]] = None,
splits: Optional[Sequence[Optional[Union[str, list[str]]]]] = None,
source_run_ids: Optional[Sequence[Optional[Union[str, UUID]]]] = None,
example_ids: Optional[Sequence[Optional[Union[str, UUID]]]] = None,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
) -> list[Example]:
"""
Create multiple examples in a dataset.
Parameters:
- inputs: List of input data
- outputs: List of output data
- metadata: List of metadata
- splits: List of split assignments
- source_run_ids: List of source run IDs
- example_ids: List of preset example IDs
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset (alternative to ID)
Returns:
List of created Example objects
"""
def read_example(
self,
example_id: Union[str, UUID]
) -> Example:
"""
Read an example by ID.
Parameters:
- example_id: ID of the example
Returns:
Example object
"""
def list_examples(
self,
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
example_ids: Optional[list[Union[str, UUID]]] = None,
splits: Optional[list[str]] = None,
metadata: Optional[dict] = None,
filter: Optional[str] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
order: Optional[str] = None,
random_seed: Optional[float] = None,
) -> Iterator[Example]:
"""
List examples with filtering.
Parameters:
- dataset_id: Filter by dataset ID
- dataset_name: Filter by dataset name
- example_ids: Filter by specific example IDs
- splits: Filter by splits
- metadata: Filter by metadata
- filter: Advanced filter string
- offset: Number of examples to skip
- limit: Maximum number of examples
- order: Sort order
- random_seed: Seed for random ordering
Returns:
Iterator of Example objects
"""def update_example(
self,
example_id: Union[str, UUID],
*,
inputs: Optional[dict] = None,
outputs: Optional[dict] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
) -> Example:
"""
Update an example.
Parameters:
- example_id: ID of the example
- inputs: New input data
- outputs: New output data
- metadata: New metadata
- split: New split assignment
Returns:
Updated Example object
"""
def delete_example(
self,
example_id: Union[str, UUID]
) -> None:
"""
Delete an example.
Parameters:
- example_id: ID of the example
"""
def create_llm_example(
self,
input: str,
generation: Optional[str] = None,
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime] = None,
example_id: Optional[Union[str, UUID]] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
) -> Example:
"""
Create an LLM-formatted example.
Parameters:
- input: Input prompt string
- generation: Expected generation
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset
- created_at: Creation timestamp
- example_id: Preset example ID
- metadata: Example metadata
- split: Dataset split
Returns:
Created Example object
"""
def create_chat_example(
self,
messages: list[Union[dict, tuple]],
*,
generations: Optional[Union[str, dict, list[dict]]] = None,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime] = None,
example_id: Optional[Union[str, UUID]] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
) -> Example:
"""
Create a chat-formatted example.
Parameters:
- messages: List of chat messages
- generations: Expected generations
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset
- created_at: Creation timestamp
- example_id: Preset example ID
- metadata: Example metadata
- split: Dataset split
Returns:
Created Example object
"""
def create_example_from_run(
self,
run: Union[Run, RunTree, dict],
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
split: Optional[Union[str, list[str]]] = None,
metadata: Optional[dict] = None,
) -> Example:
"""
Create an example from a run.
Parameters:
- run: Run object or dictionary
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset
- split: Dataset split
- metadata: Example metadata
Returns:
Created Example object
"""
def upload_examples_multipart(
self,
dataset_id: Union[str, UUID],
*,
csv_file: Optional[Union[str, Path]] = None,
csv_url: Optional[str] = None,
input_keys: Sequence[str],
output_keys: Sequence[str],
metadata_keys: Optional[Sequence[str]] = None,
) -> None:
"""
Batch upload examples via multipart.
Parameters:
- dataset_id: ID of the dataset
- csv_file: Path to CSV file
- csv_url: URL of CSV file
- input_keys: Column names for inputs
- output_keys: Column names for outputs
- metadata_keys: Column names for metadata
"""
def update_examples_multipart(
self,
dataset_id: Union[str, UUID],
*,
csv_file: Optional[Union[str, Path]] = None,
csv_url: Optional[str] = None,
input_keys: Optional[Sequence[str]] = None,
output_keys: Optional[Sequence[str]] = None,
metadata_keys: Optional[Sequence[str]] = None,
) -> None:
"""
Batch update examples via multipart.
Parameters:
- dataset_id: ID of the dataset
- csv_file: Path to CSV file
- csv_url: URL of CSV file
- input_keys: Column names for inputs
- output_keys: Column names for outputs
- metadata_keys: Column names for metadata
"""