or run

tessl search
Log in

Version

Workspace
tessl
Visibility
Public
Created
Last updated
Describes
pypipkg:pypi/langsmith@0.6.x

docs

index.md
tile.json

tessl/pypi-langsmith

tessl install tessl/pypi-langsmith@0.6.1

Python SDK for LangSmith Observability and Evaluation Platform

feedback.mddocs/data/

Feedback Management

Operations for creating and managing feedback - metrics and annotations on runs.

Creating Feedback

Feedback Operations

Operations for creating and managing feedback (metrics and annotations on runs).

def create_feedback(
    self,
    run_id: Union[str, UUID],
    key: str,
    *,
    score: Optional[Union[int, float, bool]] = None,
    value: Optional[Union[int, float, bool, str, dict, list]] = None,
    correction: Optional[dict] = None,
    comment: Optional[str] = None,
    source_info: Optional[dict] = None,
    feedback_source_type: Optional[FeedbackSourceType] = None,
    source_run_id: Optional[Union[str, UUID]] = None,
    feedback_id: Optional[Union[str, UUID]] = None,
    feedback_config: Optional[FeedbackConfig] = None,
    **kwargs
) -> Feedback:
    """
    Create feedback for a run.

    Parameters:
    - run_id: ID of the run to give feedback on
    - key: Feedback key/metric name
    - score: Numeric score
    - value: Non-numeric value
    - correction: Correction data
    - comment: Text comment
    - source_info: Information about feedback source
    - feedback_source_type: Type of feedback source
    - source_run_id: ID of evaluator run that created this feedback
    - feedback_id: Preset feedback ID
    - feedback_config: Feedback configuration

    Returns:
    Created Feedback object
    """

Reading Feedback

def read_feedback(
    self,
    feedback_id: Union[str, UUID]
) -> Feedback:
    """
    Read feedback by ID.

    Parameters:
    - feedback_id: ID of the feedback

    Returns:
    Feedback object
    """

Listing Feedback

def list_feedback(
    self,
    *,
    run_ids: Optional[list[Union[str, UUID]]] = None,
    feedback_keys: Optional[list[str]] = None,
    feedback_source_types: Optional[list[FeedbackSourceType]] = None,
    limit: Optional[int] = None,
    offset: Optional[int] = None,
    **kwargs
) -> Iterator[Feedback]:
    """
    List feedback with filtering.

    Parameters:
    - run_ids: Filter by run IDs
    - feedback_keys: Filter by feedback keys
    - feedback_source_types: Filter by source types
    - limit: Maximum number of feedback items
    - offset: Number of feedback items to skip

    Returns:
    Iterator of Feedback objects
    """

def delete_feedback(
    self,
    feedback_id: Union[str, UUID]
) -> None:
    """
    Delete feedback.

    Parameters:
    - feedback_id: ID of the feedback
    """

Public Feedback Tokens

def create_presigned_feedback_token(
    self,
    run_id: Union[str, UUID],
    feedback_key: str,
    *,
    expiration: Optional[datetime] = None,
    feedback_config: Optional[FeedbackConfig] = None,
) -> FeedbackIngestToken:
    """
    Create a presigned token for public feedback submission.

    Parameters:
    - run_id: ID of the run
    - feedback_key: Feedback key/metric name
    - expiration: When the token expires
    - feedback_config: Feedback configuration

    Returns:
    FeedbackIngestToken object with token and URL
    """
def create_feedback_from_token(
    self,
    token: Union[str, UUID],
    *,
    score: Optional[Union[int, float, bool]] = None,
    value: Optional[Union[int, float, bool, str, dict, list]] = None,
    correction: Optional[dict] = None,
    comment: Optional[str] = None,
    **kwargs
) -> None:
    """
    Create feedback using a presigned token.

    Parameters:
    - token: Presigned feedback token
    - score: Numeric score
    - value: Non-numeric value
    - correction: Correction data
    - comment: Text comment
    """