Synchronous client for interacting with the LangSmith API. The Client class provides full CRUD operations for runs, projects, datasets, examples, feedback, annotation queues, and prompts. It includes built-in tracing support with auto-batching, caching, and data anonymization capabilities.
class Client:
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
timeout_ms: Optional[Union[int, tuple[Optional[int], Optional[int], Optional[int], Optional[int]]]] = None,
web_url: Optional[str] = None,
session: Optional[requests.Session] = None,
auto_batch_tracing: bool = True,
info: Optional[ls_schemas.LangSmithInfo] = None,
api_version: str = "v1",
hide_inputs: Optional[Union[Callable[[dict], dict], bool]] = None,
hide_outputs: Optional[Union[Callable[[dict], dict], bool]] = None,
anonymizer: Optional[Callable[[Any], Any]] = None,
hide_metadata: Optional[Union[Callable[[dict], dict], bool]] = None,
cache: Union[Cache, bool] = False,
tracing_error_callback: Optional[Callable[[Exception], None]] = None,
workspace_id: Optional[str] = None,
):
"""
Create a LangSmith client.
Parameters:
- api_url: URL of the LangSmith API (defaults to LANGSMITH_ENDPOINT env var or https://api.smith.langchain.com)
- api_key: API key for authentication (defaults to LANGSMITH_API_KEY env var)
- timeout_ms: Timeout in milliseconds, or tuple of (connect, read, write, pool) timeouts
- web_url: URL of the LangSmith web UI
- session: Custom requests Session to use
- auto_batch_tracing: Whether to automatically batch run submissions
- info: Pre-fetched server information
- api_version: API version to use (default: "v1")
- hide_inputs: Function or bool to hide/redact inputs from traces
- hide_outputs: Function or bool to hide/redact outputs from traces
- anonymizer: Function to anonymize sensitive data in traces
- hide_metadata: Function or bool to hide/redact metadata from traces
- cache: Cache instance for prompt caching, or True to use default cache
- tracing_error_callback: Callback for handling tracing errors
- workspace_id: Workspace ID to use for multi-workspace accounts
"""Operations for creating, reading, updating, and deleting runs (trace spans).
def create_run(
self,
name: str,
inputs: dict,
run_type: str,
*,
execution_order: Optional[int] = None,
child_runs: Optional[Sequence[Union[RunTree, dict]]] = None,
parent_run_id: Optional[Union[str, UUID]] = None,
project_name: Optional[str] = None,
revision_id: Optional[Union[str, UUID]] = None,
outputs: Optional[dict] = None,
error: Optional[str] = None,
reference_example_id: Optional[Union[str, UUID]] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
tags: Optional[list[str]] = None,
extra: Optional[dict] = None,
trace_id: Optional[Union[str, UUID]] = None,
dotted_order: Optional[str] = None,
run_id: Optional[Union[str, UUID]] = None,
session_name: Optional[str] = None,
**kwargs
) -> None:
"""
Create a new run in LangSmith.
Parameters:
- name: Name of the run
- inputs: Input data for the run
- run_type: Type of run (e.g., "chain", "llm", "tool", "retriever", "prompt")
- execution_order: Order of execution in parent
- child_runs: Child runs to include
- parent_run_id: ID of parent run
- project_name: Project/session to log to
- revision_id: Revision/version identifier
- outputs: Output data
- error: Error message if run failed
- reference_example_id: Dataset example ID for evaluation
- start_time: When the run started
- end_time: When the run ended
- tags: List of tags
- extra: Extra metadata
- trace_id: Root trace ID
- dotted_order: Dotted order string for tree positioning
- run_id: Preset run ID
- session_name: Alias for project_name
"""
def update_run(
self,
run_id: Union[str, UUID],
*,
end_time: Optional[datetime] = None,
error: Optional[str] = None,
outputs: Optional[dict] = None,
events: Optional[Sequence[dict]] = None,
tags: Optional[list[str]] = None,
extra: Optional[dict] = None,
input_attachments: Optional[dict[str, Attachment]] = None,
output_attachments: Optional[dict[str, Attachment]] = None,
session_id: Optional[Union[str, UUID]] = None,
metadata: Optional[dict] = None,
**kwargs
) -> None:
"""
Update an existing run.
Parameters:
- run_id: ID of the run to update
- end_time: When the run ended
- error: Error message if run failed
- outputs: Output data
- events: List of events
- tags: Tags to add
- extra: Extra metadata
- input_attachments: Input file attachments
- output_attachments: Output file attachments
- session_id: Project/session ID
- metadata: Metadata to update
"""
def read_run(
self,
run_id: Union[str, UUID],
load_child_runs: bool = False
) -> Run:
"""
Read a run by ID.
Parameters:
- run_id: ID of the run
- load_child_runs: Whether to load child runs
Returns:
Run object
"""
def list_runs(
self,
*,
project_name: Optional[str] = None,
project_id: Optional[Union[str, UUID]] = None,
run_type: Optional[str] = None,
dataset_name: Optional[str] = None,
dataset_id: Optional[Union[str, UUID]] = None,
reference_example_id: Optional[Union[str, UUID]] = None,
trace_id: Optional[Union[str, UUID]] = None,
parent_run_id: Optional[Union[str, UUID]] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
error: Optional[bool] = None,
run_ids: Optional[list[Union[str, UUID]]] = None,
filter: Optional[str] = None,
trace_filter: Optional[str] = None,
tree_filter: Optional[str] = None,
is_root: Optional[bool] = None,
select: Optional[list[str]] = None,
query: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
**kwargs
) -> Iterator[Run]:
"""
List runs with filtering.
Parameters:
- project_name: Filter by project name
- project_id: Filter by project ID
- run_type: Filter by run type
- dataset_name: Filter by associated dataset
- dataset_id: Filter by dataset ID
- reference_example_id: Filter by example ID
- trace_id: Filter by trace ID
- parent_run_id: Filter by parent run
- start_time: Filter runs after this time
- end_time: Filter runs before this time
- error: Filter by error status (True for errors only, False for non-errors)
- run_ids: Filter by specific run IDs
- filter: Advanced filter string
- trace_filter: Filter for root trace properties
- tree_filter: Filter for any run in trace tree
- is_root: Filter for root runs only
- select: Fields to include in response
- query: Full-text search query
- limit: Maximum number of runs to return
- offset: Number of runs to skip
Returns:
Iterator of Run objects
"""
def share_run(
self,
run_id: Union[str, UUID],
*,
share_id: Optional[Union[str, UUID]] = None
) -> str:
"""
Share a run and get shareable URL.
Parameters:
- run_id: ID of the run to share
- share_id: Custom share ID
Returns:
Shareable URL
"""
def unshare_run(
self,
run_id: Union[str, UUID]
) -> None:
"""
Unshare a previously shared run.
Parameters:
- run_id: ID of the run to unshare
"""
def read_run_shared_link(
self,
run_id: Union[str, UUID]
) -> Optional[str]:
"""
Get the shared link for a run if it exists.
Parameters:
- run_id: ID of the run
Returns:
Shared URL or None if not shared
"""
def run_is_shared(
self,
run_id: Union[str, UUID]
) -> bool:
"""
Check if a run is currently shared.
Parameters:
- run_id: ID of the run
Returns:
True if shared, False otherwise
"""
def get_run_url(
self,
run_id: Union[str, UUID],
*,
project_name: Optional[str] = None,
project_id: Optional[Union[str, UUID]] = None
) -> str:
"""
Get the web URL for a run.
Parameters:
- run_id: ID of the run
- project_name: Project name (for context)
- project_id: Project ID (for context)
Returns:
Web UI URL for the run
"""
def batch_ingest_runs(
self,
*,
create: Optional[Sequence[Union[RunTree, dict]]] = None,
update: Optional[Sequence[Union[RunTree, dict]]] = None,
trace_id: Optional[Union[str, UUID]] = None,
post_to_batch_endpoint: bool = False,
**kwargs
) -> None:
"""
Batch ingest multiple runs.
Parameters:
- create: Runs to create
- update: Runs to update
- trace_id: Trace ID for the batch
- post_to_batch_endpoint: Use batch endpoint
"""
def multipart_ingest(
self,
create: Optional[Sequence[Union[RunTree, dict]]] = None,
update: Optional[Sequence[Union[RunTree, dict]]] = None,
**kwargs
) -> None:
"""
Ingest runs via multipart upload.
Parameters:
- create: Runs to create
- update: Runs to update
"""
def flush(self) -> None:
"""
Flush any pending run operations.
Waits for all queued runs to be submitted to the server.
"""Operations for managing projects (also called sessions or trace groups).
def create_project(
self,
project_name: str,
*,
description: Optional[str] = None,
metadata: Optional[dict] = None,
tags: Optional[list[str]] = None,
reference_dataset_id: Optional[Union[str, UUID]] = None,
) -> TracerSession:
"""
Create a new project.
Parameters:
- project_name: Name of the project
- description: Project description
- metadata: Project metadata
- tags: Project tags
- reference_dataset_id: Associated dataset ID
Returns:
Created TracerSession object
"""
def read_project(
self,
project_name: Optional[str] = None,
project_id: Optional[Union[str, UUID]] = None
) -> TracerSession:
"""
Read a project by name or ID.
Parameters:
- project_name: Name of the project
- project_id: ID of the project (alternative to name)
Returns:
TracerSession object
"""
def list_projects(
self,
*,
project_ids: Optional[list[Union[str, UUID]]] = None,
name: Optional[str] = None,
name_contains: Optional[str] = None,
reference_dataset_id: Optional[Union[str, UUID]] = None,
reference_dataset_name: Optional[str] = None,
reference_free: Optional[bool] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Iterator[TracerSession]:
"""
List all projects with optional filtering.
Parameters:
- project_ids: Filter by specific project IDs
- name: Filter by exact name
- name_contains: Filter by name substring
- reference_dataset_id: Filter by reference dataset ID
- reference_dataset_name: Filter by reference dataset name
- reference_free: Filter projects without dataset reference
- limit: Maximum number of projects
- offset: Number of projects to skip
Returns:
Iterator of TracerSession objects
"""
def delete_project(
self,
project_name: Optional[str] = None,
project_id: Optional[Union[str, UUID]] = None
) -> None:
"""
Delete a project.
Parameters:
- project_name: Name of the project
- project_id: ID of the project (alternative to name)
"""
def update_project(
self,
project_id: Union[str, UUID],
*,
name: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[dict] = None,
tags: Optional[list[str]] = None,
end_time: Optional[datetime] = None,
) -> TracerSession:
"""
Update a project.
Parameters:
- project_id: ID of the project
- name: New name
- description: New description
- metadata: New metadata
- tags: New tags
- end_time: Mark project as ended
Returns:
Updated TracerSession object
"""
def has_project(
self,
project_name: str
) -> bool:
"""
Check if a project exists.
Parameters:
- project_name: Name of the project
Returns:
True if project exists
"""
def get_test_results(
self,
project_name: str,
*,
dataset_id: Optional[Union[str, UUID]] = None
) -> TestResults:
"""
Get test results for a project.
Parameters:
- project_name: Name of the project
- dataset_id: Optional dataset ID filter
Returns:
TestResults object
"""Operations for managing datasets (collections of examples).
def create_dataset(
self,
dataset_name: str,
*,
description: Optional[str] = None,
data_type: Optional[DataType] = None,
inputs_schema: Optional[dict] = None,
outputs_schema: Optional[dict] = None,
metadata: Optional[dict] = None,
tags: Optional[list[str]] = None,
) -> Dataset:
"""
Create a new dataset.
Parameters:
- dataset_name: Name of the dataset
- description: Dataset description
- data_type: Type of data (e.g., "kv", "llm", "chat")
- inputs_schema: JSON schema for inputs
- outputs_schema: JSON schema for outputs
- metadata: Dataset metadata
- tags: Dataset tags
Returns:
Created Dataset object
"""
def read_dataset(
self,
dataset_name: Optional[str] = None,
dataset_id: Optional[Union[str, UUID]] = None
) -> Dataset:
"""
Read a dataset by name or ID.
Parameters:
- dataset_name: Name of the dataset
- dataset_id: ID of the dataset (alternative to name)
Returns:
Dataset object
"""
def list_datasets(
self,
*,
dataset_ids: Optional[list[Union[str, UUID]]] = None,
dataset_name: Optional[str] = None,
dataset_name_contains: Optional[str] = None,
data_type: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
tags: Optional[list[str]] = None,
) -> Iterator[Dataset]:
"""
List all datasets with optional filtering.
Parameters:
- dataset_ids: Filter by specific dataset IDs
- dataset_name: Filter by exact name
- dataset_name_contains: Filter by name substring
- data_type: Filter by data type
- limit: Maximum number of datasets
- offset: Number of datasets to skip
- tags: Filter by tags
Returns:
Iterator of Dataset objects
"""
def delete_dataset(
self,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None
) -> None:
"""
Delete a dataset.
Parameters:
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset (alternative to ID)
"""
def has_dataset(
self,
dataset_name: str
) -> bool:
"""
Check if a dataset exists.
Parameters:
- dataset_name: Name of the dataset
Returns:
True if dataset exists
"""
def update_dataset_tag(
self,
dataset_id: Union[str, UUID],
tag: str,
*,
as_of: Optional[Union[datetime, str]] = None,
) -> None:
"""
Update dataset version tag.
Parameters:
- dataset_id: ID of the dataset
- tag: Tag name for this version
- as_of: Timestamp for this version
"""
def list_dataset_versions(
self,
dataset_id: Union[str, UUID],
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
search: Optional[str] = None,
) -> Iterator[dict]:
"""
List versions of a dataset.
Parameters:
- dataset_id: ID of the dataset
- limit: Maximum number of versions
- offset: Number of versions to skip
- search: Search string for tags
Returns:
Iterator of version info dictionaries
"""
def read_dataset_version(
self,
dataset_id: Union[str, UUID],
version: Union[str, datetime],
*,
include_examples: bool = False,
) -> dict:
"""
Read a specific dataset version.
Parameters:
- dataset_id: ID of the dataset
- version: Version tag or timestamp
- include_examples: Whether to include examples
Returns:
Version info dictionary
"""
def diff_dataset_versions(
self,
dataset_id: Union[str, UUID],
from_version: Union[str, datetime],
to_version: Union[str, datetime],
) -> dict:
"""
Diff two dataset versions.
Parameters:
- dataset_id: ID of the dataset
- from_version: Starting version
- to_version: Ending version
Returns:
Diff information dictionary
"""
def clone_public_dataset(
self,
token: str,
*,
source_api_url: Optional[str] = None,
dataset_name: Optional[str] = None,
) -> Dataset:
"""
Clone a public dataset.
Parameters:
- token: Public dataset token
- source_api_url: Source API URL
- dataset_name: Custom name for cloned dataset
Returns:
Cloned Dataset object
"""
def upload_dataframe(
self,
df: Any,
*,
name: str,
description: Optional[str] = None,
input_keys: Sequence[str],
output_keys: Sequence[str],
data_type: Optional[DataType] = None,
) -> Dataset:
"""
Upload a pandas DataFrame as a dataset.
Parameters:
- df: pandas DataFrame
- name: Dataset name
- description: Dataset description
- input_keys: Column names to use as inputs
- output_keys: Column names to use as outputs
- data_type: Type of data
Returns:
Created Dataset object
"""
def upload_csv(
self,
csv_file: Union[str, Path],
*,
name: str,
description: Optional[str] = None,
input_keys: Sequence[str],
output_keys: Sequence[str],
data_type: Optional[DataType] = None,
) -> Dataset:
"""
Upload a CSV file as a dataset.
Parameters:
- csv_file: Path to CSV file
- name: Dataset name
- description: Dataset description
- input_keys: Column names to use as inputs
- output_keys: Column names to use as outputs
- data_type: Type of data
Returns:
Created Dataset object
"""Operations for managing examples (records in datasets).
def create_example(
self,
inputs: dict,
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
outputs: Optional[dict] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
example_id: Optional[Union[str, UUID]] = None,
created_at: Optional[datetime] = None,
) -> Example:
"""
Create a single example in a dataset.
Parameters:
- inputs: Input data
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset (alternative to ID)
- outputs: Expected output data
- metadata: Example metadata
- split: Dataset split(s) (e.g., "train", "test")
- example_id: Preset example ID
- created_at: Creation timestamp
Returns:
Created Example object
"""
def create_examples(
self,
*,
inputs: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[Optional[dict]]] = None,
metadata: Optional[Sequence[Optional[dict]]] = None,
splits: Optional[Sequence[Optional[Union[str, list[str]]]]] = None,
source_run_ids: Optional[Sequence[Optional[Union[str, UUID]]]] = None,
example_ids: Optional[Sequence[Optional[Union[str, UUID]]]] = None,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
) -> list[Example]:
"""
Create multiple examples in a dataset.
Parameters:
- inputs: List of input data
- outputs: List of output data
- metadata: List of metadata
- splits: List of split assignments
- source_run_ids: List of source run IDs
- example_ids: List of preset example IDs
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset (alternative to ID)
Returns:
List of created Example objects
"""
def read_example(
self,
example_id: Union[str, UUID]
) -> Example:
"""
Read an example by ID.
Parameters:
- example_id: ID of the example
Returns:
Example object
"""
def list_examples(
self,
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
example_ids: Optional[list[Union[str, UUID]]] = None,
splits: Optional[list[str]] = None,
metadata: Optional[dict] = None,
filter: Optional[str] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
order: Optional[str] = None,
random_seed: Optional[float] = None,
) -> Iterator[Example]:
"""
List examples with filtering.
Parameters:
- dataset_id: Filter by dataset ID
- dataset_name: Filter by dataset name
- example_ids: Filter by specific example IDs
- splits: Filter by splits
- metadata: Filter by metadata
- filter: Advanced filter string
- offset: Number of examples to skip
- limit: Maximum number of examples
- order: Sort order
- random_seed: Seed for random ordering
Returns:
Iterator of Example objects
"""
def update_example(
self,
example_id: Union[str, UUID],
*,
inputs: Optional[dict] = None,
outputs: Optional[dict] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
) -> Example:
"""
Update an example.
Parameters:
- example_id: ID of the example
- inputs: New input data
- outputs: New output data
- metadata: New metadata
- split: New split assignment
Returns:
Updated Example object
"""
def delete_example(
self,
example_id: Union[str, UUID]
) -> None:
"""
Delete an example.
Parameters:
- example_id: ID of the example
"""
def create_llm_example(
self,
input: str,
generation: Optional[str] = None,
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime] = None,
example_id: Optional[Union[str, UUID]] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
) -> Example:
"""
Create an LLM-formatted example.
Parameters:
- input: Input prompt string
- generation: Expected generation
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset
- created_at: Creation timestamp
- example_id: Preset example ID
- metadata: Example metadata
- split: Dataset split
Returns:
Created Example object
"""
def create_chat_example(
self,
messages: list[Union[dict, tuple]],
*,
generations: Optional[Union[str, dict, list[dict]]] = None,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime] = None,
example_id: Optional[Union[str, UUID]] = None,
metadata: Optional[dict] = None,
split: Optional[Union[str, list[str]]] = None,
) -> Example:
"""
Create a chat-formatted example.
Parameters:
- messages: List of chat messages
- generations: Expected generations
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset
- created_at: Creation timestamp
- example_id: Preset example ID
- metadata: Example metadata
- split: Dataset split
Returns:
Created Example object
"""
def create_example_from_run(
self,
run: Union[Run, RunTree, dict],
*,
dataset_id: Optional[Union[str, UUID]] = None,
dataset_name: Optional[str] = None,
split: Optional[Union[str, list[str]]] = None,
metadata: Optional[dict] = None,
) -> Example:
"""
Create an example from a run.
Parameters:
- run: Run object or dictionary
- dataset_id: ID of the dataset
- dataset_name: Name of the dataset
- split: Dataset split
- metadata: Example metadata
Returns:
Created Example object
"""
def upload_examples_multipart(
self,
dataset_id: Union[str, UUID],
*,
csv_file: Optional[Union[str, Path]] = None,
csv_url: Optional[str] = None,
input_keys: Sequence[str],
output_keys: Sequence[str],
metadata_keys: Optional[Sequence[str]] = None,
) -> None:
"""
Batch upload examples via multipart.
Parameters:
- dataset_id: ID of the dataset
- csv_file: Path to CSV file
- csv_url: URL of CSV file
- input_keys: Column names for inputs
- output_keys: Column names for outputs
- metadata_keys: Column names for metadata
"""
def update_examples_multipart(
self,
dataset_id: Union[str, UUID],
*,
csv_file: Optional[Union[str, Path]] = None,
csv_url: Optional[str] = None,
input_keys: Optional[Sequence[str]] = None,
output_keys: Optional[Sequence[str]] = None,
metadata_keys: Optional[Sequence[str]] = None,
) -> None:
"""
Batch update examples via multipart.
Parameters:
- dataset_id: ID of the dataset
- csv_file: Path to CSV file
- csv_url: URL of CSV file
- input_keys: Column names for inputs
- output_keys: Column names for outputs
- metadata_keys: Column names for metadata
"""Operations for creating and managing feedback (metrics and annotations on runs).
def create_feedback(
self,
run_id: Union[str, UUID],
key: str,
*,
score: Optional[Union[int, float, bool]] = None,
value: Optional[Union[int, float, bool, str, dict, list]] = None,
correction: Optional[dict] = None,
comment: Optional[str] = None,
source_info: Optional[dict] = None,
feedback_source_type: Optional[FeedbackSourceType] = None,
source_run_id: Optional[Union[str, UUID]] = None,
feedback_id: Optional[Union[str, UUID]] = None,
feedback_config: Optional[FeedbackConfig] = None,
**kwargs
) -> Feedback:
"""
Create feedback for a run.
Parameters:
- run_id: ID of the run to give feedback on
- key: Feedback key/metric name
- score: Numeric score
- value: Non-numeric value
- correction: Correction data
- comment: Text comment
- source_info: Information about feedback source
- feedback_source_type: Type of feedback source
- source_run_id: ID of evaluator run that created this feedback
- feedback_id: Preset feedback ID
- feedback_config: Feedback configuration
Returns:
Created Feedback object
"""
def read_feedback(
self,
feedback_id: Union[str, UUID]
) -> Feedback:
"""
Read feedback by ID.
Parameters:
- feedback_id: ID of the feedback
Returns:
Feedback object
"""
def list_feedback(
self,
*,
run_ids: Optional[list[Union[str, UUID]]] = None,
feedback_keys: Optional[list[str]] = None,
feedback_source_types: Optional[list[FeedbackSourceType]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
**kwargs
) -> Iterator[Feedback]:
"""
List feedback with filtering.
Parameters:
- run_ids: Filter by run IDs
- feedback_keys: Filter by feedback keys
- feedback_source_types: Filter by source types
- limit: Maximum number of feedback items
- offset: Number of feedback items to skip
Returns:
Iterator of Feedback objects
"""
def delete_feedback(
self,
feedback_id: Union[str, UUID]
) -> None:
"""
Delete feedback.
Parameters:
- feedback_id: ID of the feedback
"""
def create_presigned_feedback_token(
self,
run_id: Union[str, UUID],
feedback_key: str,
*,
expiration: Optional[datetime] = None,
feedback_config: Optional[FeedbackConfig] = None,
) -> FeedbackIngestToken:
"""
Create a presigned token for public feedback submission.
Parameters:
- run_id: ID of the run
- feedback_key: Feedback key/metric name
- expiration: When the token expires
- feedback_config: Feedback configuration
Returns:
FeedbackIngestToken object with token and URL
"""
def create_feedback_from_token(
self,
token: Union[str, UUID],
*,
score: Optional[Union[int, float, bool]] = None,
value: Optional[Union[int, float, bool, str, dict, list]] = None,
correction: Optional[dict] = None,
comment: Optional[str] = None,
**kwargs
) -> None:
"""
Create feedback using a presigned token.
Parameters:
- token: Presigned feedback token
- score: Numeric score
- value: Non-numeric value
- correction: Correction data
- comment: Text comment
"""Operations for managing annotation queues for human review.
def create_annotation_queue(
self,
*,
name: str,
description: Optional[str] = None,
queue_id: Optional[Union[str, UUID]] = None,
) -> AnnotationQueue:
"""
Create an annotation queue.
Parameters:
- name: Name of the queue
- description: Queue description
- queue_id: Preset queue ID
Returns:
Created AnnotationQueue object
"""
def read_annotation_queue(
self,
queue_id: Union[str, UUID],
*,
queue_name: Optional[str] = None,
) -> AnnotationQueue:
"""
Read an annotation queue.
Parameters:
- queue_id: ID of the queue
- queue_name: Name of the queue (alternative)
Returns:
AnnotationQueue object
"""
def list_annotation_queues(
self,
*,
queue_ids: Optional[list[Union[str, UUID]]] = None,
name: Optional[str] = None,
name_contains: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Iterator[AnnotationQueue]:
"""
List annotation queues.
Parameters:
- queue_ids: Filter by queue IDs
- name: Filter by exact name
- name_contains: Filter by name substring
- limit: Maximum number of queues
- offset: Number of queues to skip
Returns:
Iterator of AnnotationQueue objects
"""
def delete_annotation_queue(
self,
queue_id: Union[str, UUID]
) -> None:
"""
Delete an annotation queue.
Parameters:
- queue_id: ID of the queue
"""
def add_runs_to_annotation_queue(
self,
queue_id: Union[str, UUID],
run_ids: list[Union[str, UUID]]
) -> None:
"""
Add runs to an annotation queue.
Parameters:
- queue_id: ID of the queue
- run_ids: List of run IDs to add
"""
def get_run_from_annotation_queue(
self,
queue_id: Union[str, UUID],
*,
offset: int = 0,
) -> RunWithAnnotationQueueInfo:
"""
Get the next run from an annotation queue.
Parameters:
- queue_id: ID of the queue
- offset: Offset in the queue
Returns:
RunWithAnnotationQueueInfo object
"""
def update_run_in_annotation_queue(
self,
queue_id: Union[str, UUID],
queue_run_id: Union[str, UUID],
*,
added_at: Optional[datetime] = None,
last_reviewed_time: Optional[datetime] = None,
) -> None:
"""
Update run status in annotation queue.
Parameters:
- queue_id: ID of the queue
- queue_run_id: Queue run ID
- added_at: When added to queue
- last_reviewed_time: Last review time
"""Operations for managing and versioning prompts.
def push_prompt(
self,
prompt_name: str,
*,
object: Optional[Any] = None,
description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[list[str]] = None,
is_public: Optional[bool] = None,
) -> str:
"""
Push a prompt to the hub.
Parameters:
- prompt_name: Name of the prompt
- object: Prompt object (ChatPromptTemplate, etc.)
- description: Prompt description
- readme: Markdown readme
- tags: Prompt tags
- is_public: Whether prompt is public
Returns:
Prompt URL
"""
def pull_prompt(
self,
prompt_name: str,
*,
include_model: bool = False,
) -> Any:
"""
Pull a prompt from the hub.
Parameters:
- prompt_name: Name of the prompt
- include_model: Whether to include model config
Returns:
Prompt object
"""
def list_prompts(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
query: Optional[str] = None,
is_public: Optional[bool] = None,
is_archived: Optional[bool] = None,
sort_field: Optional[str] = None,
sort_direction: Optional[str] = None,
) -> list[Prompt]:
"""
List available prompts.
Parameters:
- limit: Maximum number of prompts
- offset: Number of prompts to skip
- query: Search query
- is_public: Filter by public status
- is_archived: Filter by archived status
- sort_field: Field to sort by
- sort_direction: Sort direction ("asc" or "desc")
Returns:
List of Prompt objects
"""
def get_prompt(
self,
prompt_identifier: str
) -> Prompt:
"""
Get a specific prompt.
Parameters:
- prompt_identifier: Prompt name or identifier
Returns:
Prompt object
"""
def create_prompt(
self,
prompt_name: str,
*,
description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[list[str]] = None,
is_public: Optional[bool] = None,
) -> Prompt:
"""
Create a new prompt.
Parameters:
- prompt_name: Name of the prompt
- description: Prompt description
- readme: Markdown readme
- tags: Prompt tags
- is_public: Whether prompt is public
Returns:
Created Prompt object
"""
def update_prompt(
self,
prompt_identifier: str,
*,
description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[list[str]] = None,
is_public: Optional[bool] = None,
is_archived: Optional[bool] = None,
) -> Prompt:
"""
Update a prompt.
Parameters:
- prompt_identifier: Prompt name or identifier
- description: New description
- readme: New readme
- tags: New tags
- is_public: New public status
- is_archived: New archived status
Returns:
Updated Prompt object
"""
def delete_prompt(
self,
prompt_identifier: str
) -> None:
"""
Delete a prompt.
Parameters:
- prompt_identifier: Prompt name or identifier
"""
def like_prompt(
self,
prompt_identifier: str
) -> None:
"""
Like a prompt.
Parameters:
- prompt_identifier: Prompt name or identifier
"""
def unlike_prompt(
self,
prompt_identifier: str
) -> None:
"""
Unlike a prompt.
Parameters:
- prompt_identifier: Prompt name or identifier
"""@property
def info(self) -> ls_schemas.LangSmithInfo:
"""
Get LangSmith server information.
Returns:
LangSmithInfo object with server version and capabilities
"""
@property
def api_key(self) -> Optional[str]:
"""
Get the current API key.
Returns:
API key string or None
"""
@api_key.setter
def api_key(self, value: Optional[str]) -> None:
"""
Set the API key.
Parameters:
- value: New API key
"""
@property
def workspace_id(self) -> Optional[str]:
"""
Get the current workspace ID.
Returns:
Workspace ID or None
"""
@workspace_id.setter
def workspace_id(self, value: Optional[str]) -> None:
"""
Set the workspace ID.
Parameters:
- value: New workspace ID
"""