- Spec files
pypi-openai
Describes: pkg:pypi/openai@1.106.x
- Description
- Official Python library for the OpenAI API providing chat completions, embeddings, audio, images, and more
- Author
- tessl
- Last updated
other-apis.md docs/
1# Other APIs23Additional functionality including models management, content moderation, vector stores, webhooks, and experimental features.45## Capabilities67### Models API89List, retrieve, and manage available OpenAI models including base models and fine-tuned variants.1011```python { .api }12def list(13self,14*,15extra_headers: Headers | None = None,16extra_query: Query | None = None,17extra_body: Body | None = None,18timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN19) -> SyncPage[Model]: ...2021def retrieve(22self,23model: str,24*,25extra_headers: Headers | None = None,26extra_query: Query | None = None,27extra_body: Body | None = None,28timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN29) -> Model: ...3031def delete(32self,33model: str,34*,35extra_headers: Headers | None = None,36extra_query: Query | None = None,37extra_body: Body | None = None,38timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN39) -> ModelDeleted: ...40```4142Usage examples:4344```python45from openai import OpenAI4647client = OpenAI()4849# List all available models50models = client.models.list()5152print("Available models:")53for model in models:54print(f" {model.id}: owned by {model.owned_by}")5556# Filter models by type57base_models = []58fine_tuned_models = []5960for model in models:61if "ft:" in model.id:62fine_tuned_models.append(model)63else:64base_models.append(model)6566print(f"\nBase models: {len(base_models)}")67print(f"Fine-tuned models: {len(fine_tuned_models)}")6869# Get specific model details70model_info = client.models.retrieve("gpt-3.5-turbo")7172print(f"\nModel details for gpt-3.5-turbo:")73print(f" ID: {model_info.id}")74print(f" Created: {model_info.created}")75print(f" Owned by: {model_info.owned_by}")7677# List fine-tuned models only78print("\nYour fine-tuned models:")79for model in fine_tuned_models:80print(f" {model.id} (created: {model.created})")8182# Delete fine-tuned model (if needed)83# Note: Only fine-tuned models can be deleted84# if fine_tuned_models:85# model_to_delete = fine_tuned_models[0].id86# deletion_result = client.models.delete(model_to_delete)87# print(f"Deleted model: {deletion_result.deleted}")8889# Model capabilities lookup90def get_model_capabilities(model_id: str):91"""Get model capabilities and specifications"""9293capabilities = {94# Chat models95"gpt-4": {"type": "chat", "context": 8192, "training_data": "Sep 2021"},96"gpt-4-turbo": {"type": "chat", "context": 128000, "training_data": "Dec 2023"},97"gpt-4o": {"type": "chat", "context": 128000, "training_data": "Oct 2023"},98"gpt-3.5-turbo": {"type": "chat", "context": 16385, "training_data": "Sep 2021"},99100# Embedding models101"text-embedding-3-small": {"type": "embedding", "dimensions": 1536, "max_input": 8191},102"text-embedding-3-large": {"type": "embedding", "dimensions": 3072, "max_input": 8191},103"text-embedding-ada-002": {"type": "embedding", "dimensions": 1536, "max_input": 8191},104105# Image models106"dall-e-3": {"type": "image", "max_size": "1792x1024", "styles": ["vivid", "natural"]},107"dall-e-2": {"type": "image", "max_size": "1024x1024", "variations": True},108109# Audio models110"whisper-1": {"type": "audio", "capabilities": ["transcription", "translation"]},111"tts-1": {"type": "tts", "voices": 6, "formats": ["mp3", "opus", "aac", "flac"]},112"tts-1-hd": {"type": "tts", "voices": 6, "formats": ["mp3", "opus", "aac", "flac"], "quality": "hd"},113114# Legacy models115"gpt-3.5-turbo-instruct": {"type": "completion", "context": 4097, "training_data": "Sep 2021"}116}117118return capabilities.get(model_id, {"type": "unknown"})119120# Check model capabilities121test_models = ["gpt-4", "text-embedding-3-small", "dall-e-3"]122123for model_id in test_models:124caps = get_model_capabilities(model_id)125print(f"\n{model_id} capabilities:")126for key, value in caps.items():127print(f" {key}: {value}")128```129130### Moderations API131132Analyze content for policy violations and safety concerns using OpenAI's moderation models.133134```python { .api }135def create(136self,137*,138input: Union[str, List[str], List[ModerationMultiModalInputParam]],139model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN,140extra_headers: Headers | None = None,141extra_query: Query | None = None,142extra_body: Body | None = None,143timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN144) -> ModerationCreateResponse: ...145```146147Usage examples:148149```python150# Basic content moderation151text_to_check = "This is a sample text to check for policy violations."152153moderation_result = client.moderations.create(154input=text_to_check155)156157result = moderation_result.results[0]158159print(f"Flagged: {result.flagged}")160print(f"Categories: {result.categories}")161print(f"Category scores: {result.category_scores}")162163# Check multiple texts164texts_to_check = [165"Hello, how are you today?",166"This is normal conversation.",167"I love programming and AI technology."168]169170batch_moderation = client.moderations.create(171input=texts_to_check172)173174for i, result in enumerate(batch_moderation.results):175text = texts_to_check[i]176flagged = result.flagged177178print(f"Text {i+1}: {'⚠️ FLAGGED' if flagged else '✅ CLEAN'}")179print(f" Content: {text[:50]}...")180181if flagged:182# Show which categories were flagged183flagged_categories = [cat for cat, flagged in result.categories.model_dump().items() if flagged]184print(f" Flagged for: {', '.join(flagged_categories)}")185186# Advanced moderation with multimodal input187multimodal_input = [188{189"type": "text",190"text": "Please review this content for safety"191},192{193"type": "image_url",194"image_url": {"url": "https://example.com/image.jpg"}195}196]197198# Note: Multimodal moderation may require specific model199multimodal_result = client.moderations.create(200input=multimodal_input,201model="omni-moderation-latest"202)203204# Content filtering function205def content_filter(text: str, threshold: float = 0.5):206"""Filter content based on moderation scores"""207208moderation = client.moderations.create(input=text)209result = moderation.results[0]210211if result.flagged:212return {213"allowed": False,214"reason": "Content flagged by moderation",215"categories": [cat for cat, flagged in result.categories.model_dump().items() if flagged]216}217218# Check individual category scores against threshold219high_risk_categories = []220for category, score in result.category_scores.model_dump().items():221if score > threshold:222high_risk_categories.append(category)223224if high_risk_categories:225return {226"allowed": False,227"reason": f"High risk scores (>{threshold})",228"categories": high_risk_categories229}230231return {"allowed": True, "reason": "Content passed moderation"}232233# Test content filter234test_content = "This is educational content about AI safety."235filter_result = content_filter(test_content)236237print(f"Content filter result: {filter_result}")238239# Batch content moderation for user-generated content240def moderate_user_content(contents: list):241"""Moderate multiple pieces of user content"""242243# Process in batches to handle API limits244batch_size = 20245all_results = []246247for i in range(0, len(contents), batch_size):248batch = contents[i:i + batch_size]249250moderation = client.moderations.create(input=batch)251252for j, result in enumerate(moderation.results):253content_idx = i + j254all_results.append({255"content_id": content_idx,256"content": batch[j][:100] + "..." if len(batch[j]) > 100 else batch[j],257"flagged": result.flagged,258"categories": result.categories.model_dump(),259"scores": result.category_scores.model_dump()260})261262return all_results263264# Example user content moderation265user_posts = [266"Just had an amazing coffee this morning!",267"Check out this cool AI project I'm working on.",268"Programming is such a fun hobby.",269] * 10 # 30 posts270271moderation_results = moderate_user_content(user_posts)272273flagged_count = sum(1 for r in moderation_results if r["flagged"])274print(f"Moderated {len(moderation_results)} posts, {flagged_count} flagged")275```276277### Vector Stores API278279Create and manage vector stores for efficient similarity search and retrieval operations.280281```python { .api }282def create(283self,284*,285file_ids: List[str] | NotGiven = NOT_GIVEN,286name: str | NotGiven = NOT_GIVEN,287expires_after: VectorStoreExpiresAfter | NotGiven = NOT_GIVEN,288chunking_strategy: ChunkingStrategyParam | NotGiven = NOT_GIVEN,289metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN290) -> VectorStore: ...291292def list(293self,294*,295after: str | NotGiven = NOT_GIVEN,296before: str | NotGiven = NOT_GIVEN,297limit: int | NotGiven = NOT_GIVEN,298order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN299) -> SyncCursorPage[VectorStore]: ...300301def retrieve(302self,303vector_store_id: str304) -> VectorStore: ...305306def update(307self,308vector_store_id: str,309*,310name: str | NotGiven = NOT_GIVEN,311expires_after: VectorStoreExpiresAfter | NotGiven = NOT_GIVEN,312metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN313) -> VectorStore: ...314315def delete(316self,317vector_store_id: str318) -> VectorStoreDeleted: ...319```320321Usage examples:322323```python324# Upload files for vector store325documents = ["doc1.txt", "doc2.pdf", "doc3.md"]326file_ids = []327328for doc in documents:329with open(doc, "rb") as f:330file_obj = client.files.create(331file=f,332purpose="assistants"333)334file_ids.append(file_obj.id)335336# Create vector store337vector_store = client.beta.vector_stores.create(338name="Knowledge Base",339file_ids=file_ids,340expires_after={341"anchor": "last_active_at",342"days": 30343},344metadata={"project": "documentation", "version": "1.0"}345)346347print(f"Created vector store: {vector_store.id}")348print(f"Status: {vector_store.status}")349print(f"File counts: {vector_store.file_counts}")350351# List vector stores352vector_stores = client.beta.vector_stores.list()353354print("Your vector stores:")355for vs in vector_stores:356print(f" {vs.id}: {vs.name} ({vs.file_counts.total} files)")357358# Update vector store359updated_store = client.beta.vector_stores.update(360vector_store.id,361name="Updated Knowledge Base",362metadata={"project": "documentation", "version": "2.0", "updated": "true"}363)364365# Vector store file management366# Add files to existing vector store367additional_files = ["doc4.txt", "doc5.pdf"]368369for doc in additional_files:370with open(doc, "rb") as f:371file_obj = client.files.create(file=f, purpose="assistants")372373# Add to vector store374vs_file = client.beta.vector_stores.files.create(375vector_store_id=vector_store.id,376file_id=file_obj.id377)378379print(f"Added file {file_obj.id} to vector store")380381# List files in vector store382vs_files = client.beta.vector_stores.files.list(vector_store.id)383384print(f"Files in vector store {vector_store.id}:")385for vs_file in vs_files:386print(f" {vs_file.id}: {vs_file.status}")387388# Delete file from vector store389if vs_files.data:390file_to_remove = vs_files.data[0].id391392deletion_result = client.beta.vector_stores.files.delete(393vector_store_id=vector_store.id,394file_id=file_to_remove395)396397print(f"Removed file {file_to_remove}: {deletion_result.deleted}")398399# Batch file operations400batch_files = ["batch1.txt", "batch2.txt", "batch3.txt"]401batch_file_ids = []402403for doc in batch_files:404with open(doc, "rb") as f:405file_obj = client.files.create(file=f, purpose="assistants")406batch_file_ids.append(file_obj.id)407408# Create file batch for vector store409file_batch = client.beta.vector_stores.file_batches.create(410vector_store_id=vector_store.id,411file_ids=batch_file_ids412)413414print(f"Created file batch: {file_batch.id}")415print(f"Status: {file_batch.status}")416417# Monitor batch progress418import time419420def monitor_file_batch(vector_store_id: str, batch_id: str):421"""Monitor file batch processing"""422423while True:424batch = client.beta.vector_stores.file_batches.retrieve(425vector_store_id=vector_store_id,426batch_id=batch_id427)428429print(f"Batch status: {batch.status}")430print(f"File counts: {batch.file_counts}")431432if batch.status in ["completed", "failed", "cancelled"]:433return batch434435time.sleep(5)436437# Monitor the batch438# final_batch = monitor_file_batch(vector_store.id, file_batch.id)439440# Delete vector store441# deletion_result = client.beta.vector_stores.delete(vector_store.id)442# print(f"Vector store deleted: {deletion_result.deleted}")443```444445### Webhooks API446447Set up and manage webhooks for real-time notifications of API events and job completions.448449```python { .api }450# Webhook verification utilities451def verify_webhook_signature(452payload: bytes,453signature: str,454secret: str455) -> bool: ...456457def construct_webhook_payload(458timestamp: int,459payload: str,460secret: str461) -> str: ...462```463464Usage examples:465466```python467import hmac468import hashlib469import time470471# Webhook signature verification472def verify_webhook_signature(payload: bytes, signature: str, secret: str) -> bool:473"""Verify webhook signature for security"""474475# Extract timestamp and signature from header476elements = signature.split(',')477timestamp = None478sig = None479480for element in elements:481if element.startswith('t='):482timestamp = element[2:]483elif element.startswith('v1='):484sig = element[3:]485486if not timestamp or not sig:487return False488489# Create expected signature490signed_payload = f"{timestamp}.{payload.decode()}"491expected_sig = hmac.new(492secret.encode(),493signed_payload.encode(),494hashlib.sha256495).hexdigest()496497return hmac.compare_digest(sig, expected_sig)498499# Webhook event handler example500class WebhookHandler:501"""Handle OpenAI webhook events"""502503def __init__(self, webhook_secret: str):504self.secret = webhook_secret505self.handlers = {}506507def register_handler(self, event_type: str, handler_func):508"""Register handler for specific event type"""509self.handlers[event_type] = handler_func510511def handle_webhook(self, payload: bytes, signature: str):512"""Process incoming webhook"""513514# Verify signature515if not verify_webhook_signature(payload, signature, self.secret):516raise ValueError("Invalid webhook signature")517518# Parse event519import json520event_data = json.loads(payload)521522event_type = event_data.get('type')523524if event_type in self.handlers:525self.handlers[event_type](event_data)526else:527print(f"No handler for event type: {event_type}")528529def handle_fine_tuning_job_completed(self, event):530"""Handle fine-tuning job completion"""531job_id = event['data']['id']532status = event['data']['status']533model = event['data'].get('fine_tuned_model')534535print(f"Fine-tuning job {job_id} completed with status: {status}")536if model:537print(f"New model available: {model}")538539def handle_batch_completed(self, event):540"""Handle batch job completion"""541batch_id = event['data']['id']542status = event['data']['status']543544print(f"Batch {batch_id} completed with status: {status}")545546# Download results if successful547if status == 'completed':548output_file_id = event['data'].get('output_file_id')549if output_file_id:550print(f"Results available in file: {output_file_id}")551552# Set up webhook handler553webhook_handler = WebhookHandler("your_webhook_secret")554555# Register event handlers556webhook_handler.register_handler(557"fine_tuning.job.completed",558webhook_handler.handle_fine_tuning_job_completed559)560561webhook_handler.register_handler(562"batch.completed",563webhook_handler.handle_batch_completed564)565566# Example Flask webhook endpoint567"""568from flask import Flask, request569570app = Flask(__name__)571572@app.route('/webhooks/openai', methods=['POST'])573def handle_openai_webhook():574payload = request.get_data()575signature = request.headers.get('OpenAI-Signature')576577try:578webhook_handler.handle_webhook(payload, signature)579return {'status': 'success'}, 200580except ValueError as e:581return {'error': str(e)}, 400582except Exception as e:583return {'error': 'Internal server error'}, 500584585if __name__ == '__main__':586app.run(port=8000)587"""588589# Webhook testing utility590def create_test_webhook_payload(event_type: str, data: dict, secret: str):591"""Create test webhook payload for testing"""592593import json594595timestamp = int(time.time())596597event = {598"type": event_type,599"data": data,600"created_at": timestamp601}602603payload = json.dumps(event)604605# Create signature606signed_payload = f"{timestamp}.{payload}"607signature = hmac.new(608secret.encode(),609signed_payload.encode(),610hashlib.sha256611).hexdigest()612613webhook_signature = f"t={timestamp},v1={signature}"614615return payload.encode(), webhook_signature616617# Test webhook handling618test_event_data = {619"id": "ftjob-test123",620"status": "succeeded",621"fine_tuned_model": "ft:gpt-3.5-turbo-0125:org:model:abc123"622}623624test_payload, test_signature = create_test_webhook_payload(625"fine_tuning.job.completed",626test_event_data,627"your_webhook_secret"628)629630# Process test webhook631webhook_handler.handle_webhook(test_payload, test_signature)632```633634### Upload Management635636Handle large file uploads with chunked upload support for efficient data transfer.637638```python { .api }639def create(640self,641*,642bytes: int,643filename: str,644mime_type: str,645purpose: Literal["assistants", "batch", "fine-tune", "vision"]646) -> Upload: ...647648def cancel(649self,650upload_id: str651) -> Upload: ...652653def complete(654self,655upload_id: str,656*,657part_ids: List[str],658md5: str | NotGiven = NOT_GIVEN659) -> Upload: ...660```661662Usage examples:663664```python665import os666import hashlib667668# Large file upload with chunking669def upload_large_file(file_path: str, purpose: str, chunk_size: int = 8 * 1024 * 1024):670"""Upload large file using chunked upload"""671672file_size = os.path.getsize(file_path)673filename = os.path.basename(file_path)674675print(f"Uploading {filename} ({file_size} bytes)")676677# Create upload678upload = client.uploads.create(679bytes=file_size,680filename=filename,681mime_type="application/octet-stream",682purpose=purpose683)684685print(f"Created upload: {upload.id}")686687# Upload parts688part_ids = []689690with open(file_path, 'rb') as f:691part_number = 0692693while True:694chunk = f.read(chunk_size)695if not chunk:696break697698part_number += 1699700# Upload part701part = client.uploads.parts.create(702upload_id=upload.id,703data=chunk704)705706part_ids.append(part.id)707708progress = (part_number * chunk_size) / file_size * 100709print(f"Uploaded part {part_number}: {min(progress, 100):.1f}%")710711# Calculate MD5 hash712with open(file_path, 'rb') as f:713file_hash = hashlib.md5(f.read()).hexdigest()714715# Complete upload716completed_upload = client.uploads.complete(717upload_id=upload.id,718part_ids=part_ids,719md5=file_hash720)721722print(f"Upload completed: {completed_upload.file.id}")723return completed_upload.file724725# Example large file upload726# large_file = upload_large_file("large_dataset.jsonl", "fine-tune")727728# Upload with error handling729def robust_file_upload(file_path: str, purpose: str, max_retries: int = 3):730"""Upload file with retry logic"""731732for attempt in range(max_retries):733try:734if os.path.getsize(file_path) > 100 * 1024 * 1024: # > 100MB735# Use chunked upload736return upload_large_file(file_path, purpose)737else:738# Use regular upload739with open(file_path, "rb") as f:740return client.files.create(file=f, purpose=purpose)741742except Exception as e:743print(f"Upload attempt {attempt + 1} failed: {e}")744745if attempt == max_retries - 1:746raise747748time.sleep(2 ** attempt) # Exponential backoff749750# Upload with progress tracking751class UploadProgressTracker:752"""Track upload progress for multiple files"""753754def __init__(self):755self.uploads = {}756757def track_upload(self, file_path: str, purpose: str):758"""Track file upload with progress"""759760filename = os.path.basename(file_path)761file_size = os.path.getsize(file_path)762763self.uploads[filename] = {764"size": file_size,765"uploaded": 0,766"status": "starting"767}768769try:770if file_size > 100 * 1024 * 1024: # Large file771file_obj = self._upload_large_with_progress(file_path, purpose)772else:773file_obj = self._upload_regular_with_progress(file_path, purpose)774775self.uploads[filename]["status"] = "completed"776self.uploads[filename]["file_id"] = file_obj.id777778return file_obj779780except Exception as e:781self.uploads[filename]["status"] = "failed"782self.uploads[filename]["error"] = str(e)783raise784785def _upload_regular_with_progress(self, file_path: str, purpose: str):786"""Upload regular file with progress tracking"""787788filename = os.path.basename(file_path)789790with open(file_path, "rb") as f:791self.uploads[filename]["status"] = "uploading"792793file_obj = client.files.create(file=f, purpose=purpose)794795self.uploads[filename]["uploaded"] = self.uploads[filename]["size"]796797return file_obj798799def _upload_large_with_progress(self, file_path: str, purpose: str):800"""Upload large file with detailed progress tracking"""801802# Implementation would track chunk-by-chunk progress803# Similar to upload_large_file but with progress updates804return upload_large_file(file_path, purpose)805806def get_progress(self):807"""Get upload progress summary"""808809total_files = len(self.uploads)810completed_files = sum(1 for u in self.uploads.values() if u["status"] == "completed")811failed_files = sum(1 for u in self.uploads.values() if u["status"] == "failed")812813return {814"total": total_files,815"completed": completed_files,816"failed": failed_files,817"in_progress": total_files - completed_files - failed_files818}819820# Example usage821tracker = UploadProgressTracker()822823files_to_upload = ["dataset1.jsonl", "dataset2.jsonl", "dataset3.jsonl"]824825for file_path in files_to_upload:826try:827file_obj = tracker.track_upload(file_path, "fine-tune")828print(f"✅ Uploaded {file_path}: {file_obj.id}")829except Exception as e:830print(f"❌ Failed to upload {file_path}: {e}")831832progress = tracker.get_progress()833print(f"Upload summary: {progress}")834```835836## Types837838### Models API Types839840```python { .api }841class Model(BaseModel):842id: str843created: int844object: Literal["model"]845owned_by: str846847class ModelDeleted(BaseModel):848id: str849deleted: bool850object: Literal["model"]851```852853### Moderations API Types854855```python { .api }856class ModerationCreateResponse(BaseModel):857id: str858model: str859results: List[ModerationResult]860861class ModerationResult(BaseModel):862categories: ModerationCategories863category_scores: ModerationCategoryScores864flagged: bool865866class ModerationCategories(BaseModel):867harassment: bool868harassment_threatening: bool869hate: bool870hate_threatening: bool871illicit: bool872illicit_violent: bool873self_harm: bool874self_harm_instructions: bool875self_harm_intent: bool876sexual: bool877sexual_minors: bool878violence: bool879violence_graphic: bool880881class ModerationCategoryScores(BaseModel):882harassment: float883harassment_threatening: float884hate: float885hate_threatening: float886illicit: float887illicit_violent: float888self_harm: float889self_harm_instructions: float890self_harm_intent: float891sexual: float892sexual_minors: float893violence: float894violence_graphic: float895896ModerationModel = Literal[897"omni-moderation-latest",898"omni-moderation-2024-09-26",899"text-moderation-latest",900"text-moderation-stable"901]902903# Multimodal input types904ModerationMultiModalInputParam = Union[905ModerationImageURLInputParam,906ModerationTextInputParam907]908909class ModerationTextInputParam(TypedDict, total=False):910type: Required[Literal["text"]]911text: Required[str]912913class ModerationImageURLInputParam(TypedDict, total=False):914type: Required[Literal["image_url"]]915image_url: Required[ModerationImageURL]916```917918### Vector Stores Types919920```python { .api }921class VectorStore(BaseModel):922id: str923created_at: int924file_counts: VectorStoreFileCounts925last_active_at: Optional[int]926metadata: Optional[Dict[str, str]]927name: str928object: Literal["vector_store"]929status: Literal["expired", "in_progress", "completed"]930usage_bytes: int931expires_after: Optional[VectorStoreExpiresAfter]932expires_at: Optional[int]933934class VectorStoreFileCounts(BaseModel):935cancelled: int936completed: int937failed: int938in_progress: int939total: int940941class VectorStoreExpiresAfter(BaseModel):942anchor: Literal["last_active_at"]943days: int944945class VectorStoreDeleted(BaseModel):946id: str947deleted: bool948object: Literal["vector_store.deleted"]949950# Chunking strategy types951ChunkingStrategyParam = Union[952AutoChunkingStrategyParam,953StaticChunkingStrategyParam954]955956class AutoChunkingStrategyParam(TypedDict, total=False):957type: Required[Literal["auto"]]958959class StaticChunkingStrategyParam(TypedDict, total=False):960type: Required[Literal["static"]]961static: Required[StaticChunkingStrategy]962963class StaticChunkingStrategy(TypedDict, total=False):964max_chunk_size_tokens: Required[int]965chunk_overlap_tokens: Required[int]966```967968### Upload Types969970```python { .api }971class Upload(BaseModel):972id: str973bytes: int974created_at: int975expires_at: int976filename: str977object: Literal["upload"]978purpose: str979status: Literal["pending", "completed", "cancelled", "expired"]980file: Optional[FileObject]981982class UploadPart(BaseModel):983id: str984created_at: int985object: Literal["upload.part"]986upload_id: str987988# Upload parameters989class UploadCreateParams(TypedDict, total=False):990bytes: Required[int]991filename: Required[str]992mime_type: Required[str]993purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]]994995class UploadCompleteParams(TypedDict, total=False):996part_ids: Required[List[str]]997md5: NotRequired[str]998```9991000## Best Practices10011002### Models Management10031004- Regularly check for new model releases and capabilities1005- Monitor fine-tuned model performance and update as needed1006- Keep track of model deprecation schedules1007- Use appropriate models for specific tasks to optimize costs10081009### Content Moderation10101011- Implement moderation for all user-generated content1012- Use appropriate moderation models for your content type1013- Set reasonable thresholds for different risk categories1014- Regularly review and update moderation policies10151016### Vector Stores10171018- Organize documents logically within vector stores1019- Use meaningful names and metadata for easy management1020- Monitor storage usage and implement cleanup procedures1021- Keep vector stores updated when source documents change10221023### Webhooks10241025- Always verify webhook signatures for security1026- Implement proper error handling for webhook events1027- Use webhooks to trigger automated workflows1028- Monitor webhook delivery and implement retry logic for failures10291030### File Uploads10311032- Use chunked uploads for large files (>100MB)1033- Implement progress tracking for better user experience1034- Add retry logic for upload failures1035- Validate file integrity with checksums when possible