Interface between LLMs and your data
—
Comprehensive evaluation capabilities for RAG systems including retrieval metrics, response quality assessment, and dataset generation. The evaluation framework provides tools for measuring system performance, generating synthetic test data, and conducting systematic evaluations of LLM applications.
Foundation interfaces for evaluation operations with standardized result structures and metrics computation.
class BaseEvaluator:
"""
Base interface for evaluation implementations.
Evaluators assess different aspects of system performance including
response quality, retrieval accuracy, and adherence to guidelines.
"""
def __init__(self, **kwargs): ...
def evaluate(
self,
query: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
response: Optional[str] = None,
**kwargs
) -> EvaluationResult:
"""
Evaluate system performance on given inputs.
Parameters:
- query: Optional[str], input query or question
- contexts: Optional[Sequence[str]], retrieved context passages
- response: Optional[str], system response to evaluate
- **kwargs: additional evaluation parameters
Returns:
- EvaluationResult, evaluation result with score and feedback
"""
async def aevaluate(
self,
query: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
response: Optional[str] = None,
**kwargs
) -> EvaluationResult:
"""Asynchronous evaluation method."""
class EvaluationResult:
"""
Result of evaluation with score and detailed feedback.
Parameters:
- score: Optional[float], numerical evaluation score
- passing: Optional[bool], whether evaluation passed threshold
- feedback: Optional[str], detailed feedback and explanation
- metadata: Optional[dict], additional evaluation metadata
"""
def __init__(
self,
score: Optional[float] = None,
passing: Optional[bool] = None,
feedback: Optional[str] = None,
metadata: Optional[dict] = None,
**kwargs
): ...
def __str__(self) -> str:
"""String representation of evaluation result."""Evaluators for assessing the quality, relevance, and accuracy of system responses.
class FaithfulnessEvaluator(BaseEvaluator):
"""
Evaluator for response faithfulness to retrieved contexts.
Assesses whether the response is grounded in and consistent with
the provided context information without hallucination.
Parameters:
- llm: Optional[LLM], language model for faithfulness assessment
- raise_error: bool, whether to raise errors on evaluation failure
- eval_template: Optional[BasePromptTemplate], evaluation prompt template
"""
def __init__(
self,
llm: Optional[LLM] = None,
raise_error: bool = False,
eval_template: Optional[BasePromptTemplate] = None,
**kwargs
): ...
class AnswerRelevancyEvaluator(BaseEvaluator):
"""
Evaluator for answer relevancy to the input query.
Measures how well the response addresses the specific question asked
and whether it provides relevant information.
Parameters:
- llm: Optional[LLM], language model for relevancy assessment
- embed_model: Optional[BaseEmbedding], embedding model for similarity
- raise_error: bool, whether to raise errors on evaluation failure
"""
def __init__(
self,
llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
raise_error: bool = False,
**kwargs
): ...
class ContextRelevancyEvaluator(BaseEvaluator):
"""
Evaluator for context relevancy to the query.
Assesses whether the retrieved context passages are relevant
to answering the input query.
Parameters:
- llm: Optional[LLM], language model for relevancy assessment
- raise_error: bool, whether to raise errors on evaluation failure
"""
def __init__(
self,
llm: Optional[LLM] = None,
raise_error: bool = False,
**kwargs
): ...
class CorrectnessEvaluator(BaseEvaluator):
"""
Evaluator for response correctness against reference answers.
Compares system responses to ground truth answers to assess
factual accuracy and completeness.
Parameters:
- llm: Optional[LLM], language model for correctness assessment
- raise_error: bool, whether to raise errors on evaluation failure
- eval_template: Optional[BasePromptTemplate], evaluation prompt template
"""
def __init__(
self,
llm: Optional[LLM] = None,
raise_error: bool = False,
eval_template: Optional[BasePromptTemplate] = None,
**kwargs
): ...
class SemanticSimilarityEvaluator(BaseEvaluator):
"""
Evaluator for semantic similarity between responses and references.
Uses embedding models to assess semantic similarity between
generated responses and reference answers.
Parameters:
- embed_model: Optional[BaseEmbedding], embedding model for similarity
- similarity_threshold: float, threshold for similarity scoring
"""
def __init__(
self,
embed_model: Optional[BaseEmbedding] = None,
similarity_threshold: float = 0.8,
**kwargs
): ...
class RelevancyEvaluator(BaseEvaluator):
"""
General relevancy evaluator for response-query alignment.
Provides flexible relevancy assessment with customizable
evaluation criteria and scoring methods.
Parameters:
- llm: Optional[LLM], language model for relevancy assessment
- raise_error: bool, whether to raise errors on evaluation failure
"""
def __init__(
self,
llm: Optional[LLM] = None,
raise_error: bool = False,
**kwargs
): ...Evaluators for adherence to specific guidelines, criteria, or custom evaluation frameworks.
class GuidelineEvaluator(BaseEvaluator):
"""
Evaluator for adherence to specific guidelines or criteria.
Assesses whether responses follow specified guidelines,
style requirements, or content policies.
Parameters:
- guidelines: str, guidelines text or criteria description
- llm: Optional[LLM], language model for guideline assessment
- eval_template: Optional[BasePromptTemplate], evaluation prompt template
"""
def __init__(
self,
guidelines: str,
llm: Optional[LLM] = None,
eval_template: Optional[BasePromptTemplate] = None,
**kwargs
): ...
class PairwiseComparisonEvaluator(BaseEvaluator):
"""
Evaluator for pairwise comparison between responses.
Compares two responses to determine which better satisfies
the evaluation criteria or user requirements.
Parameters:
- llm: Optional[LLM], language model for comparison assessment
- eval_template: Optional[BasePromptTemplate], comparison prompt template
"""
def __init__(
self,
llm: Optional[LLM] = None,
eval_template: Optional[BasePromptTemplate] = None,
**kwargs
): ...
def evaluate(
self,
query: str,
response_a: str,
response_b: str,
contexts: Optional[Sequence[str]] = None,
**kwargs
) -> EvaluationResult:
"""
Compare two responses for the same query.
Parameters:
- query: str, input query for comparison
- response_a: str, first response to compare
- response_b: str, second response to compare
- contexts: Optional[Sequence[str]], context for evaluation
Returns:
- EvaluationResult, comparison result indicating preferred response
"""Specialized evaluation framework for retrieval system performance and accuracy.
class BaseRetrievalEvaluator:
"""
Base interface for retrieval evaluation implementations.
Retrieval evaluators assess the quality of information retrieval
systems including accuracy, coverage, and ranking quality.
"""
def __init__(self, **kwargs): ...
def evaluate(
self,
query: str,
expected_ids: List[str],
retrieved_ids: List[str],
**kwargs
) -> RetrievalEvalResult:
"""
Evaluate retrieval performance.
Parameters:
- query: str, input query
- expected_ids: List[str], expected relevant document IDs
- retrieved_ids: List[str], actually retrieved document IDs
Returns:
- RetrievalEvalResult, retrieval evaluation result
"""
class RetrievalEvalResult:
"""
Result of retrieval evaluation with multiple metrics.
Parameters:
- query: str, evaluated query
- expected_ids: List[str], expected relevant IDs
- retrieved_ids: List[str], retrieved IDs
- metric_vals_dict: Dict[str, float], metric name to value mapping
"""
def __init__(
self,
query: str,
expected_ids: List[str],
retrieved_ids: List[str],
metric_vals_dict: Dict[str, float],
**kwargs
): ...
class RetrieverEvaluator(BaseRetrievalEvaluator):
"""
Comprehensive retriever evaluation with multiple metrics.
Parameters:
- metrics: Optional[List[BaseMetric]], metrics to compute
- retriever: Optional[BaseRetriever], retriever to evaluate
- node_postprocessors: Optional[List[BaseNodePostprocessor]], postprocessors
"""
def __init__(
self,
metrics: Optional[List[BaseMetric]] = None,
retriever: Optional[BaseRetriever] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
**kwargs
): ...
async def aevaluate_dataset(
self,
dataset: Any,
workers: int = 2,
show_progress: bool = False,
**kwargs
) -> List[RetrievalEvalResult]:
"""Asynchronously evaluate entire dataset."""
class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator):
"""
Evaluator for multi-modal retrieval systems.
Assesses retrieval performance for systems handling multiple
content modalities including text, images, and other media.
Parameters:
- metrics: Optional[List[BaseMetric]], evaluation metrics
- retriever: Optional[BaseRetriever], multi-modal retriever to evaluate
"""
def __init__(
self,
metrics: Optional[List[BaseMetric]] = None,
retriever: Optional[BaseRetriever] = None,
**kwargs
): ...Specific metrics for measuring retrieval system performance and quality.
class RetrievalMetricResult:
"""
Result container for individual retrieval metrics.
Parameters:
- metric_name: str, name of the computed metric
- score: float, metric score value
- metadata: Optional[dict], additional metric metadata
"""
def __init__(
self,
metric_name: str,
score: float,
metadata: Optional[dict] = None
): ...
class HitRate:
"""
Hit rate metric for retrieval evaluation.
Measures the fraction of queries for which at least one
relevant document was retrieved in the top-k results.
"""
def __init__(self): ...
def compute(
self,
query: str,
expected_ids: List[str],
retrieved_ids: List[str]
) -> RetrievalMetricResult:
"""
Compute hit rate for single query.
Parameters:
- query: str, input query
- expected_ids: List[str], relevant document IDs
- retrieved_ids: List[str], retrieved document IDs
Returns:
- RetrievalMetricResult, hit rate result (1.0 if hit, 0.0 if miss)
"""
class MRR:
"""
Mean Reciprocal Rank (MRR) metric for retrieval evaluation.
Measures the quality of ranking by computing the reciprocal
of the rank of the first relevant document retrieved.
"""
def __init__(self): ...
def compute(
self,
query: str,
expected_ids: List[str],
retrieved_ids: List[str]
) -> RetrievalMetricResult:
"""
Compute MRR for single query.
Parameters:
- query: str, input query
- expected_ids: List[str], relevant document IDs
- retrieved_ids: List[str], retrieved document IDs
Returns:
- RetrievalMetricResult, MRR score (1/rank of first relevant doc)
"""
def resolve_metrics(metric_names: List[str]) -> List[BaseMetric]:
"""
Resolve metric instances from metric names.
Parameters:
- metric_names: List[str], names of metrics to resolve
Returns:
- List[BaseMetric], resolved metric instances
"""Tools for generating synthetic evaluation datasets and test cases for system validation.
class DatasetGenerator:
"""
Generator for synthetic evaluation datasets.
Creates question-answer pairs, retrieval test cases, and other
evaluation data from source documents and knowledge bases.
Parameters:
- nodes: List[BaseNode], source nodes for dataset generation
- llm: Optional[LLM], language model for question generation
- num_questions_per_chunk: int, questions to generate per text chunk
- text_question_template: Optional[BasePromptTemplate], question generation prompt
- text_qa_template: Optional[BasePromptTemplate], QA pair generation prompt
"""
def __init__(
self,
nodes: List[BaseNode],
llm: Optional[LLM] = None,
num_questions_per_chunk: int = 2,
text_question_template: Optional[BasePromptTemplate] = None,
text_qa_template: Optional[BasePromptTemplate] = None,
**kwargs
): ...
def generate_questions_from_nodes(self, num: Optional[int] = None) -> List[str]:
"""
Generate questions from source nodes.
Parameters:
- num: Optional[int], number of questions to generate
Returns:
- List[str], generated questions
"""
def generate_dataset_from_nodes(self, num: Optional[int] = None) -> "QueryResponseDataset":
"""
Generate complete QA dataset from nodes.
Parameters:
- num: Optional[int], number of QA pairs to generate
Returns:
- QueryResponseDataset, generated dataset with questions and answers
"""
async def agenerate_questions_from_nodes(self, num: Optional[int] = None) -> List[str]:
"""Async version of question generation."""
async def agenerate_dataset_from_nodes(self, num: Optional[int] = None) -> "QueryResponseDataset":
"""Async version of dataset generation."""
class QueryResponseDataset:
"""
Dataset container for query-response evaluation pairs.
Parameters:
- queries: List[str], evaluation queries
- responses: List[str], expected responses
- relevant_docs: Optional[List[List[str]]], relevant document IDs per query
"""
def __init__(
self,
queries: List[str],
responses: List[str],
relevant_docs: Optional[List[List[str]]] = None,
**kwargs
): ...
def save_json(self, path: str) -> None:
"""Save dataset to JSON file."""
@classmethod
def from_json(cls, path: str) -> "QueryResponseDataset":
"""Load dataset from JSON file."""
def __len__(self) -> int:
"""Get dataset size."""
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""Get dataset item by index."""
class EmbeddingQAFinetuneDataset:
"""
Dataset for embedding model fine-tuning with query-context pairs.
Parameters:
- queries: List[str], training queries
- corpus: List[str], text corpus for contexts
- relevant_docs: List[List[str]], relevant document mapping
"""
def __init__(
self,
queries: List[str],
corpus: List[str],
relevant_docs: List[List[str]],
**kwargs
): ...
def save_json(self, path: str) -> None:
"""Save fine-tuning dataset to JSON file."""
@classmethod
def from_json(cls, path: str) -> "EmbeddingQAFinetuneDataset":
"""Load fine-tuning dataset from JSON file."""Tools for running large-scale evaluations across multiple queries and systems.
class BatchEvalRunner:
"""
Batch evaluation runner for systematic evaluation across datasets.
Coordinates evaluation of multiple queries, responses, and systems
with parallel processing and result aggregation.
Parameters:
- evaluators: Dict[str, BaseEvaluator], named evaluators to run
- workers: int, number of worker processes for parallel evaluation
- show_progress: bool, whether to show evaluation progress
"""
def __init__(
self,
evaluators: Dict[str, BaseEvaluator],
workers: int = 2,
show_progress: bool = True,
**kwargs
): ...
def evaluate_queries(
self,
queries: List[str],
responses: List[str],
contexts_list: Optional[List[List[str]]] = None,
**kwargs
) -> Dict[str, List[EvaluationResult]]:
"""
Evaluate multiple queries in batch.
Parameters:
- queries: List[str], evaluation queries
- responses: List[str], system responses to evaluate
- contexts_list: Optional[List[List[str]]], contexts per query
Returns:
- Dict[str, List[EvaluationResult]], evaluator name to results mapping
"""
async def aevaluate_queries(
self,
queries: List[str],
responses: List[str],
contexts_list: Optional[List[List[str]]] = None,
**kwargs
) -> Dict[str, List[EvaluationResult]]:
"""Async batch evaluation."""Helper functions for evaluation dataset generation and result processing.
def generate_qa_embedding_pairs(
nodes: List[BaseNode],
llm: LLM,
qa_generate_prompt_tmpl: str,
num_questions_per_chunk: int = 2
) -> EmbeddingQAFinetuneDataset:
"""
Generate QA pairs for embedding fine-tuning.
Parameters:
- nodes: List[BaseNode], source nodes for generation
- llm: LLM, language model for question generation
- qa_generate_prompt_tmpl: str, prompt template for QA generation
- num_questions_per_chunk: int, questions per text chunk
Returns:
- EmbeddingQAFinetuneDataset, generated QA dataset for fine-tuning
"""
def generate_question_context_pairs(
nodes: List[BaseNode],
llm: LLM,
num_questions_per_chunk: int = 2,
question_gen_query: str = "Generate questions from context"
) -> List[Tuple[str, str]]:
"""
Generate question-context pairs for evaluation.
Parameters:
- nodes: List[BaseNode], source nodes
- llm: LLM, language model for generation
- num_questions_per_chunk: int, questions per chunk
- question_gen_query: str, query for question generation
Returns:
- List[Tuple[str, str]], question-context pairs
"""
def get_retrieval_results_df(
names: List[str],
results_arr: List[List[RetrievalEvalResult]]
) -> "pd.DataFrame":
"""
Convert retrieval results to pandas DataFrame for analysis.
Parameters:
- names: List[str], names for result sets
- results_arr: List[List[RetrievalEvalResult]], evaluation results
Returns:
- pd.DataFrame, results formatted as DataFrame
"""Legacy evaluator interfaces maintained for backward compatibility.
# Legacy aliases and classes maintained for compatibility
QueryResponseEvaluator = BaseEvaluator
ResponseEvaluator = BaseEvaluator
LabelledQADataset = QueryResponseDatasetfrom llama_index.core.evaluation import FaithfulnessEvaluator, RelevancyEvaluator
from llama_index.core.llms import MockLLM
# Initialize evaluators
llm = MockLLM()
faithfulness_evaluator = FaithfulnessEvaluator(llm=llm)
relevancy_evaluator = RelevancyEvaluator(llm=llm)
# Sample data for evaluation
query = "What is machine learning?"
contexts = [
"Machine learning is a subset of artificial intelligence that enables computers to learn from data.",
"Deep learning uses neural networks with multiple layers to process complex patterns."
]
response = "Machine learning is a branch of AI that allows systems to automatically learn from data without being explicitly programmed."
# Evaluate faithfulness
faithfulness_result = faithfulness_evaluator.evaluate(
query=query,
contexts=contexts,
response=response
)
print(f"Faithfulness Score: {faithfulness_result.score}")
print(f"Faithfulness Feedback: {faithfulness_result.feedback}")
# Evaluate relevancy
relevancy_result = relevancy_evaluator.evaluate(
query=query,
response=response
)
print(f"Relevancy Score: {relevancy_result.score}")
print(f"Relevancy Feedback: {relevancy_result.feedback}")from llama_index.core.evaluation import RetrieverEvaluator, HitRate, MRR
from llama_index.core import VectorStoreIndex, Document
# Create test index and retriever
documents = [
Document(text="Machine learning algorithms learn patterns from data.", metadata={"doc_id": "doc1"}),
Document(text="Deep learning uses neural networks for complex tasks.", metadata={"doc_id": "doc2"}),
Document(text="Natural language processing handles text understanding.", metadata={"doc_id": "doc3"})
]
index = VectorStoreIndex.from_documents(documents)
retriever = index.as_retriever(similarity_top_k=2)
# Initialize retrieval evaluator
metrics = [HitRate(), MRR()]
retrieval_evaluator = RetrieverEvaluator(
metrics=metrics,
retriever=retriever
)
# Evaluate single query
query = "What are neural networks used for?"
expected_ids = ["doc2"] # Ground truth relevant documents
# Get retrieved results
retrieved_nodes = retriever.retrieve(query)
retrieved_ids = [node.node.metadata["doc_id"] for node in retrieved_nodes]
# Evaluate retrieval performance
eval_result = retrieval_evaluator.evaluate(
query=query,
expected_ids=expected_ids,
retrieved_ids=retrieved_ids
)
print(f"Hit Rate: {eval_result.metric_vals_dict.get('hit_rate', 0)}")
print(f"MRR: {eval_result.metric_vals_dict.get('mrr', 0)}")from llama_index.core.evaluation import DatasetGenerator
from llama_index.core.node_parser import SentenceSplitter
# Parse documents into nodes
parser = SentenceSplitter(chunk_size=512)
nodes = parser.get_nodes_from_documents(documents)
# Generate evaluation dataset
dataset_generator = DatasetGenerator(
nodes=nodes,
llm=llm,
num_questions_per_chunk=3
)
# Generate questions and QA dataset
questions = dataset_generator.generate_questions_from_nodes(num=5)
print("Generated Questions:")
for i, question in enumerate(questions, 1):
print(f"{i}. {question}")
# Generate complete QA dataset
qa_dataset = dataset_generator.generate_dataset_from_nodes(num=5)
print(f"\\nGenerated {len(qa_dataset)} QA pairs")
for i in range(min(2, len(qa_dataset))):
item = qa_dataset[i]
print(f"Q{i+1}: {item['query']}")
print(f"A{i+1}: {item['response']}")from llama_index.core.evaluation import BatchEvalRunner
# Setup multiple evaluators
evaluators = {
"faithfulness": FaithfulnessEvaluator(llm=llm),
"relevancy": RelevancyEvaluator(llm=llm),
"answer_relevancy": AnswerRelevancyEvaluator(llm=llm)
}
# Create batch runner
batch_runner = BatchEvalRunner(
evaluators=evaluators,
workers=2,
show_progress=True
)
# Prepare evaluation data
eval_queries = [
"What is machine learning?",
"How do neural networks work?",
"What is natural language processing?"
]
eval_responses = [
"Machine learning is AI that learns from data automatically.",
"Neural networks are computing systems inspired by biological neural networks.",
"NLP is a field focused on interaction between computers and human language."
]
eval_contexts = [
["Machine learning enables computers to learn from data without explicit programming."],
["Neural networks consist of interconnected nodes that process information."],
["Natural language processing combines linguistics and computer science."]
]
# Run batch evaluation
batch_results = batch_runner.evaluate_queries(
queries=eval_queries,
responses=eval_responses,
contexts_list=eval_contexts
)
# Process results
for evaluator_name, results in batch_results.items():
avg_score = sum(r.score or 0 for r in results) / len(results)
print(f"{evaluator_name.title()} - Average Score: {avg_score:.3f}")from llama_index.core.evaluation import GuidelineEvaluator
# Define evaluation guidelines
guidelines = """
Response Quality Guidelines:
1. Answers should be concise and directly address the question
2. Technical terms should be explained simply
3. Responses should be factual and avoid speculation
4. Include examples when helpful
5. Maintain a helpful and professional tone
"""
# Create guideline evaluator
guideline_evaluator = GuidelineEvaluator(
guidelines=guidelines,
llm=llm
)
# Evaluate response against guidelines
response_to_evaluate = "Machine learning is super complicated stuff that uses math and computers and data and things."
guideline_result = guideline_evaluator.evaluate(
query="What is machine learning?",
response=response_to_evaluate
)
print(f"Guideline Adherence Score: {guideline_result.score}")
print(f"Guideline Feedback: {guideline_result.feedback}")from llama_index.core.evaluation import PairwiseComparisonEvaluator
# Create pairwise evaluator
pairwise_evaluator = PairwiseComparisonEvaluator(llm=llm)
# Compare two different responses
response_a = "Machine learning is a subset of AI that learns from data."
response_b = "Machine learning uses algorithms to find patterns in data and make predictions automatically."
comparison_result = pairwise_evaluator.evaluate(
query="What is machine learning?",
response_a=response_a,
response_b=response_b
)
print(f"Preferred Response: {comparison_result.feedback}")
print(f"Comparison Score: {comparison_result.score}")from llama_index.core.evaluation import RetrievalMetricResult
class Precision:
"""Custom precision metric for retrieval evaluation."""
def __init__(self, k: int = 10):
self.k = k
def compute(
self,
query: str,
expected_ids: List[str],
retrieved_ids: List[str]
) -> RetrievalMetricResult:
"""Compute precision@k."""
# Take top k retrieved documents
top_k_retrieved = retrieved_ids[:self.k]
# Count relevant documents in top k
relevant_in_top_k = len(set(top_k_retrieved) & set(expected_ids))
# Calculate precision
precision = relevant_in_top_k / len(top_k_retrieved) if top_k_retrieved else 0.0
return RetrievalMetricResult(
metric_name=f"precision_at_{self.k}",
score=precision
)
# Use custom metric
precision_metric = Precision(k=5)
precision_result = precision_metric.compute(
query="test query",
expected_ids=["doc1", "doc3"],
retrieved_ids=["doc1", "doc2", "doc3", "doc4", "doc5"]
)
print(f"Precision@5: {precision_result.score}")# Collect evaluation results across different queries
all_results = []
for query, response in zip(eval_queries, eval_responses):
faithfulness = faithfulness_evaluator.evaluate(
query=query,
contexts=eval_contexts[eval_queries.index(query)],
response=response
)
relevancy = relevancy_evaluator.evaluate(
query=query,
response=response
)
all_results.append({
"query": query,
"faithfulness_score": faithfulness.score,
"relevancy_score": relevancy.score,
"average_score": (faithfulness.score + relevancy.score) / 2
})
# Analyze results
for result in all_results:
print(f"Query: {result['query']}")
print(f" Faithfulness: {result['faithfulness_score']:.3f}")
print(f" Relevancy: {result['relevancy_score']:.3f}")
print(f" Average: {result['average_score']:.3f}")
print()# Evaluation configuration
DEFAULT_EVAL_BATCH_SIZE = 20
DEFAULT_WORKERS = 2
DEFAULT_SIMILARITY_THRESHOLD = 0.8
# Evaluation modes
class EvaluationMode(str, Enum):
SINGLE = "single"
BATCH = "batch"
STREAMING = "streaming"
# Metric types
BaseMetric = Union[HitRate, MRR, Any]
# Dataset formats
SUPPORTED_DATASET_FORMATS = ["json", "csv", "jsonl"]
# Evaluation result types
EvalResultType = Union[EvaluationResult, RetrievalEvalResult]Install with Tessl CLI
npx tessl i tessl/pypi-llama-index-core