CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-groq

The official Python library for the groq API

Pending
Overview
Eval results
Files

embeddings.mddocs/

Text Embeddings

Generate high-quality vector embeddings for text inputs, supporting both single strings and batch processing. The embeddings API converts text into dense vector representations that can be used for semantic search, clustering, and other machine learning tasks.

Capabilities

Create Embeddings

Generate embeddings for single text strings or batch process multiple texts simultaneously.

def create(
    input: Union[str, List[str]],
    model: Union[str, Literal["nomic-embed-text-v1_5"]],
    encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
    user: Optional[str] | NotGiven = NOT_GIVEN,
    extra_headers: Headers | None = None,
    extra_query: Query | None = None,
    extra_body: Body | None = None,
    timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN
) -> CreateEmbeddingResponse:
    """
    Create embeddings for the given input text(s).
    
    Parameters:
    - input: Text string or list of text strings to embed
    - model: Model identifier to use for embeddings
    - encoding_format: Format for the embedding vectors ("float" or "base64")
    - user: Unique identifier representing your end-user
    
    Returns:
    CreateEmbeddingResponse containing embedding vectors and usage information
    """

Async Create Embeddings

Asynchronous version of embedding creation with identical parameters and functionality.

async def create(
    input: Union[str, List[str]],
    model: Union[str, Literal["nomic-embed-text-v1_5"]],
    encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
    user: Optional[str] | NotGiven = NOT_GIVEN,
    **kwargs
) -> CreateEmbeddingResponse:
    """Async version of create() with identical parameters."""

Usage Examples

Single Text Embedding

from groq import Groq

client = Groq()

response = client.embeddings.create(
    input="The quick brown fox jumps over the lazy dog",
    model="nomic-embed-text-v1_5"
)

embedding = response.data[0].embedding
print(f"Embedding dimension: {len(embedding)}")
print(f"First few values: {embedding[:5]}")

Batch Text Embeddings

from groq import Groq

client = Groq()

texts = [
    "Machine learning is a subset of artificial intelligence.",
    "Deep learning uses neural networks with multiple layers.",
    "Natural language processing helps computers understand text.",
    "Computer vision enables machines to interpret visual information."
]

response = client.embeddings.create(
    input=texts,
    model="nomic-embed-text-v1_5"
)

for i, embedding_obj in enumerate(response.data):
    print(f"Text {i+1} embedding dimension: {len(embedding_obj.embedding)}")

Async Usage

import asyncio
from groq import AsyncGroq

async def main():
    client = AsyncGroq()
    
    response = await client.embeddings.create(
        input="Async embedding generation example",
        model="nomic-embed-text-v1_5"
    )
    
    embedding = response.data[0].embedding
    print(f"Generated embedding with {len(embedding)} dimensions")

asyncio.run(main())

Semantic Search Example

import numpy as np
from groq import Groq

client = Groq()

# Documents to search through
documents = [
    "Python is a high-level programming language.",
    "JavaScript is used for web development.",
    "Machine learning algorithms can predict outcomes.",
    "Databases store and organize information.",
    "APIs enable communication between applications."
]

# Query to search for
query = "programming languages for software development"

# Generate embeddings for all documents and query
all_texts = documents + [query]
response = client.embeddings.create(
    input=all_texts,
    model="nomic-embed-text-v1_5"
)

# Extract embeddings
doc_embeddings = [resp.embedding for resp in response.data[:-1]]
query_embedding = response.data[-1].embedding

# Calculate cosine similarity
def cosine_similarity(a, b):
    return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))

# Find most similar document
similarities = [cosine_similarity(query_embedding, doc_emb) for doc_emb in doc_embeddings]
best_match_idx = np.argmax(similarities)

print(f"Query: {query}")
print(f"Most similar document: {documents[best_match_idx]}")
print(f"Similarity score: {similarities[best_match_idx]:.4f}")

Types

Request Types

class EmbeddingCreateParams:
    input: Union[str, List[str]]
    model: Union[str, Literal["nomic-embed-text-v1_5"]]
    encoding_format: Literal["float", "base64"] | NotGiven
    user: Optional[str] | NotGiven

Response Types

class CreateEmbeddingResponse:
    data: List[Embedding]
    model: str
    object: Literal["list"]
    usage: EmbeddingUsage

class Embedding:
    embedding: List[float]
    index: int
    object: Literal["embedding"]

class EmbeddingUsage:
    prompt_tokens: int
    total_tokens: int

Install with Tessl CLI

npx tessl i tessl/pypi-groq

docs

audio.md

batches.md

chat-completions.md

embeddings.md

files.md

index.md

models.md

tile.json