or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

audio-processing.mdconversational-ai.mddubbing.mdindex.mdmusic.mdrealtime.mdstudio.mdtext-to-speech.mdtranscription.mdvoices.mdworkspace.md

voices.mddocs/

0

# Voice Management

1

2

## Voice Discovery

3

4

### voices.getAll()

5

6

```typescript { .api }

7

getAll(

8

request?: { showLegacy?: boolean },

9

options?: RequestOptions

10

): Promise<GetVoicesResponse>

11

12

interface GetVoicesResponse {

13

voices: Voice[];

14

}

15

```

16

17

### voices.search()

18

19

```typescript { .api }

20

search(

21

request?: {

22

nextPageToken?: string;

23

pageSize?: number; // Max 100, default 10

24

search?: string; // Searches name, description, labels, category

25

sort?: "created_at_unix" | "name";

26

sortDirection?: "asc" | "desc";

27

voiceType?: "personal" | "community" | "default" | "workspace" | "non-default";

28

category?: "premade" | "cloned" | "generated" | "professional";

29

fineTuningState?: "draft" | "not_verified" | "not_started" | "queued" | "fine_tuning" | "fine_tuned" | "failed" | "delayed";

30

collectionId?: string;

31

includeTotalCount?: boolean; // Performance cost

32

voiceIds?: string | string[]; // Max 100

33

},

34

options?: RequestOptions

35

): Promise<GetVoicesV2Response>

36

37

interface GetVoicesV2Response {

38

voices: Voice[];

39

hasMore: boolean;

40

totalCount: number;

41

nextPageToken?: string;

42

}

43

```

44

45

## Voice CRUD

46

47

### voices.get()

48

49

```typescript { .api }

50

get(

51

voiceId: string,

52

request?: { withSettings?: boolean }, // Deprecated, ignored

53

options?: RequestOptions

54

): Promise<Voice>

55

```

56

57

### voices.delete()

58

59

```typescript { .api }

60

delete(

61

voiceId: string,

62

options?: RequestOptions

63

): Promise<DeleteVoiceResponseModel>

64

```

65

66

### voices.update()

67

68

```typescript { .api }

69

update(

70

voiceId: string,

71

request: {

72

name: string;

73

files?: File[];

74

removeBackgroundNoise?: boolean;

75

description?: string;

76

labels?: string; // Serialized JSON

77

},

78

options?: RequestOptions

79

): Promise<EditVoiceResponseModel>

80

```

81

82

## Voice Types

83

84

### Voice

85

86

```typescript { .api }

87

interface Voice {

88

voiceId: string;

89

name?: string;

90

samples?: VoiceSample[];

91

category?: "premade" | "cloned" | "generated" | "professional";

92

fineTuning?: FineTuningResponse;

93

labels?: Record<string, string>;

94

description?: string;

95

previewUrl?: string;

96

availableForTiers?: string[];

97

settings?: VoiceSettings;

98

sharing?: VoiceSharingResponse;

99

highQualityBaseModelIds?: string[];

100

verifiedLanguages?: VerifiedVoiceLanguageResponseModel[];

101

safetyControl?: string;

102

voiceVerification?: VoiceVerificationResponse;

103

permissionOnResource?: string;

104

isOwner?: boolean;

105

isLegacy?: boolean;

106

isMixed?: boolean;

107

favoritedAtUnix?: number;

108

createdAtUnix?: number;

109

}

110

111

interface VoiceSample {

112

sampleId?: string;

113

fileName?: string;

114

mimeType?: string;

115

sizeBytes?: number;

116

hash?: string;

117

durationSecs?: number;

118

removeBackgroundNoise?: boolean;

119

hasIsolatedAudio?: boolean;

120

hasIsolatedAudioPreview?: boolean;

121

speakerSeparation?: SpeakerSeparationResponseModel;

122

trimStart?: number;

123

trimEnd?: number;

124

}

125

126

interface VoiceSettings {

127

stability?: number; // 0-1

128

similarityBoost?: number; // 0-1

129

style?: number; // 0-1

130

useSpeakerBoost?: boolean;

131

}

132

```

133

134

## Instant Voice Cloning (IVC)

135

136

### voices.ivc.create()

137

138

```typescript { .api }

139

ivc.create(

140

request: {

141

name: string;

142

files: File[];

143

removeBackgroundNoise?: boolean;

144

description?: string;

145

labels?: string; // Serialized JSON

146

},

147

options?: RequestOptions

148

): Promise<AddVoiceIvcResponseModel>

149

```

150

151

## Professional Voice Cloning (PVC)

152

153

### voices.pvc.create()

154

155

```typescript { .api }

156

pvc.create(

157

request: {

158

name: string;

159

language: string;

160

description?: string;

161

labels?: Record<string, string | undefined>;

162

},

163

options?: RequestOptions

164

): Promise<AddVoiceResponseModel>

165

```

166

167

### voices.pvc.update()

168

169

```typescript { .api }

170

pvc.update(

171

voiceId: string,

172

request?: {

173

name?: string;

174

language?: string;

175

description?: string;

176

labels?: Record<string, string | undefined>;

177

},

178

options?: RequestOptions

179

): Promise<AddVoiceResponseModel>

180

```

181

182

### voices.pvc.train()

183

184

```typescript { .api }

185

pvc.train(

186

voiceId: string,

187

request?: { modelId?: string },

188

options?: RequestOptions

189

): Promise<StartPvcVoiceTrainingResponseModel>

190

```

191

192

## PVC Sample Management

193

194

### voices.pvc.samples.create()

195

196

```typescript { .api }

197

pvc.samples.create(

198

voiceId: string,

199

request: {

200

files: File[];

201

removeBackgroundNoise?: boolean;

202

},

203

options?: RequestOptions

204

): Promise<VoiceSample[]>

205

```

206

207

### voices.pvc.samples.update()

208

209

```typescript { .api }

210

pvc.samples.update(

211

voiceId: string,

212

sampleId: string,

213

request?: {

214

fileName?: string;

215

removeBackgroundNoise?: boolean;

216

speakerId?: string;

217

trimStart?: number;

218

trimEnd?: number;

219

},

220

options?: RequestOptions

221

): Promise<AddVoiceResponseModel>

222

```

223

224

### voices.pvc.samples.delete()

225

226

```typescript { .api }

227

pvc.samples.delete(

228

voiceId: string,

229

sampleId: string,

230

options?: RequestOptions

231

): Promise<DeleteVoiceSampleResponseModel>

232

```

233

234

### voices.pvc.samples.audio.get()

235

236

```typescript { .api }

237

pvc.samples.audio.get(

238

voiceId: string,

239

sampleId: string,

240

request?: { removeBackgroundNoise?: boolean },

241

options?: RequestOptions

242

): Promise<VoiceSamplePreviewResponseModel>

243

```

244

245

### voices.pvc.samples.waveform.get()

246

247

```typescript { .api }

248

pvc.samples.waveform.get(

249

voiceId: string,

250

sampleId: string,

251

options?: RequestOptions

252

): Promise<VoiceSampleVisualWaveformResponseModel>

253

```

254

255

## Speaker Separation

256

257

### voices.pvc.samples.speakers.separate()

258

259

```typescript { .api }

260

pvc.samples.speakers.separate(

261

voiceId: string,

262

sampleId: string,

263

options?: RequestOptions

264

): Promise<StartSpeakerSeparationResponseModel>

265

```

266

267

### voices.pvc.samples.speakers.get()

268

269

```typescript { .api }

270

pvc.samples.speakers.get(

271

voiceId: string,

272

sampleId: string,

273

options?: RequestOptions

274

): Promise<SpeakerSeparationResponseModel>

275

```

276

277

### voices.pvc.samples.speakers.audio.get()

278

279

```typescript { .api }

280

pvc.samples.speakers.audio.get(

281

voiceId: string,

282

sampleId: string,

283

speakerId: string,

284

options?: RequestOptions

285

): Promise<SpeakerAudioResponseModel>

286

```

287

288

## PVC Verification

289

290

### voices.pvc.verification.request()

291

292

```typescript { .api }

293

pvc.verification.request(

294

voiceId: string,

295

request: {

296

files: File[]; // ID documents, consent forms

297

extraText?: string;

298

},

299

options?: RequestOptions

300

): Promise<RequestPvcManualVerificationResponseModel>

301

```

302

303

### voices.pvc.verification.captcha.get()

304

305

```typescript { .api }

306

pvc.verification.captcha.get(

307

voiceId: string,

308

options?: RequestOptions

309

): Promise<void>

310

```

311

312

### voices.pvc.verification.captcha.verify()

313

314

```typescript { .api }

315

pvc.verification.captcha.verify(

316

voiceId: string,

317

request: { recording: File | fs.ReadStream },

318

options?: RequestOptions

319

): Promise<VerifyPvcVoiceCaptchaResponseModel>

320

```

321

322

## Voice Design (Text-to-Voice)

323

324

### textToVoice.design()

325

326

```typescript { .api }

327

design(

328

request: {

329

voiceDescription: string;

330

outputFormat?: OutputFormat;

331

modelId?: "eleven_multilingual_ttv_v2" | "eleven_ttv_v3";

332

text?: string; // 100-1000 chars

333

autoGenerateText?: boolean;

334

loudness?: number; // -1 to 1, 0 = ~-24 LUFS

335

seed?: number;

336

guidanceScale?: number; // Lower = more creative, higher = more literal

337

streamPreviews?: boolean;

338

remixingSessionId?: string;

339

remixingSessionIterationId?: string;

340

quality?: number; // Higher = better quality, less variety

341

referenceAudioBase64?: string; // eleven_ttv_v3 only

342

promptStrength?: number; // 0-1, eleven_ttv_v3 with reference audio

343

},

344

options?: RequestOptions

345

): Promise<VoiceDesignPreviewResponse>

346

347

interface VoiceDesignPreviewResponse {

348

previews: VoicePreviewResponseModel[];

349

text: string;

350

}

351

352

interface VoicePreviewResponseModel {

353

audioBase64: string;

354

generatedVoiceId: string;

355

mediaType: string;

356

durationSecs: number;

357

language?: string;

358

}

359

```

360

361

### textToVoice.create()

362

363

```typescript { .api }

364

create(

365

request: {

366

voiceName: string;

367

voiceDescription: string;

368

generatedVoiceId: string;

369

labels?: Record<string, string>;

370

},

371

options?: RequestOptions

372

): Promise<Voice>

373

```

374

375

### textToVoice.remix()

376

377

```typescript { .api }

378

remix(

379

request: {

380

voiceId: string;

381

voiceModification: string;

382

outputFormat?: OutputFormat;

383

modelId?: "eleven_multilingual_ttv_v2" | "eleven_ttv_v3";

384

text?: string;

385

autoGenerateText?: boolean;

386

loudness?: number;

387

seed?: number;

388

guidanceScale?: number;

389

streamPreviews?: boolean;

390

remixingSessionId?: string;

391

remixingSessionIterationId?: string;

392

},

393

options?: RequestOptions

394

): Promise<VoiceDesignPreviewResponse>

395

```

396

397

## Voice Settings

398

399

### voices.getSettings()

400

401

```typescript { .api }

402

getSettings(

403

voiceId: string,

404

options?: RequestOptions

405

): Promise<VoiceSettings>

406

```

407

408

### voices.updateSettings()

409

410

```typescript { .api }

411

updateSettings(

412

voiceId: string,

413

request: VoiceSettings,

414

options?: RequestOptions

415

): Promise<VoiceSettings>

416

```

417

418

## Voice Sharing

419

420

### voices.getSharingSettings()

421

422

```typescript { .api }

423

getSharingSettings(

424

voiceId: string,

425

options?: RequestOptions

426

): Promise<VoiceSharingResponse>

427

```

428

429

### voices.updateSharingSettings()

430

431

```typescript { .api }

432

updateSharingSettings(

433

voiceId: string,

434

request: {

435

category?: "personal" | "high_quality" | "professional";

436

reviewStatus?: string;

437

reviewMessage?: string;

438

enablePublicUsage?: boolean;

439

enabledInLibrary?: boolean;

440

publicOwnerId?: string;

441

originalVoiceId?: string;

442

financialRewardsEnabled?: boolean;

443

freeUsersAllowed?: boolean;

444

liveMode rationLimit?: number;

445

voiceMixing?: boolean;

446

featuredWeight?: number;

447

noticeType?: string;

448

noticePeriod?: number;

449

bannedReasons?: string[];

450

instagramUsername?: string;

451

twitterUsername?: string;

452

youtubeUsername?: string;

453

tiktokUsername?: string;

454

},

455

options?: RequestOptions

456

): Promise<VoiceSharingResponse>

457

```

458

459

## Library Voices

460

461

### voices.getLibraryVoices()

462

463

```typescript { .api }

464

getLibraryVoices(

465

request?: {

466

pageSize?: number;

467

category?: string;

468

gender?: string;

469

age?: string;

470

accent?: string;

471

language?: string;

472

search?: string;

473

useCases?: string[];

474

descriptives?: string[];

475

featuredVoicesOnlyBoolean?: boolean;

476

readersPerSampleMin?: number;

477

readersPerSampleMax?: number;

478

sortBy?: string;

479

page?: number;

480

},

481

options?: RequestOptions

482

): Promise<GetLibraryVoicesResponse>

483

```

484

485

### voices.getSharedVoices()

486

487

```typescript { .api }

488

getSharedVoices(

489

request?: {

490

pageSize?: number;

491

page?: number;

492

},

493

options?: RequestOptions

494

): Promise<GetSharedVoicesResponse>

495

```

496

497

## Usage Examples

498

499

### Search and List Voices

500

501

```typescript

502

import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";

503

504

const client = new ElevenLabsClient({ apiKey: process.env.ELEVENLABS_API_KEY });

505

506

// Get all voices

507

const all = await client.voices.getAll();

508

console.log(`Found ${all.voices.length} voices`);

509

510

// Search with filters

511

const results = await client.voices.search({

512

search: "professional",

513

category: "cloned",

514

pageSize: 20,

515

includeTotalCount: true

516

});

517

518

// Paginate

519

if (results.hasMore) {

520

const next = await client.voices.search({

521

nextPageToken: results.nextPageToken

522

});

523

}

524

525

// Get specific voice

526

const voice = await client.voices.get("21m00Tcm4TlvDq8ikWAM");

527

```

528

529

### Instant Voice Cloning

530

531

```typescript

532

import { createReadStream } from "fs";

533

534

const voice = await client.voices.ivc.create({

535

name: "John's Voice",

536

files: [

537

createReadStream("/path/sample1.mp3"),

538

createReadStream("/path/sample2.mp3"),

539

createReadStream("/path/sample3.mp3")

540

],

541

removeBackgroundNoise: true,

542

description: "Professional male voice",

543

labels: JSON.stringify({ accent: "american", gender: "male" })

544

});

545

546

console.log(`Created voice: ${voice.voiceId}`);

547

```

548

549

### Professional Voice Cloning Workflow

550

551

```typescript

552

import { createReadStream } from "fs";

553

554

// 1. Create PVC voice

555

const pvc = await client.voices.pvc.create({

556

name: "Pro Voice",

557

language: "en",

558

description: "High-quality voice for audiobooks"

559

});

560

561

// 2. Add samples

562

await client.voices.pvc.samples.create(pvc.voiceId, {

563

files: [

564

createReadStream("/path/sample1.wav"),

565

createReadStream("/path/sample2.wav"),

566

createReadStream("/path/sample3.wav")

567

],

568

removeBackgroundNoise: true

569

});

570

571

// 3. Start training

572

await client.voices.pvc.train(pvc.voiceId);

573

574

// 4. Check status

575

const status = await client.voices.get(pvc.voiceId);

576

console.log(`Fine-tuning: ${status.fineTuning?.fineTuningState}`);

577

```

578

579

### Speaker Separation

580

581

```typescript

582

// Start separation

583

await client.voices.pvc.samples.speakers.separate("voiceId", "sampleId");

584

585

// Check status

586

const sep = await client.voices.pvc.samples.speakers.get("voiceId", "sampleId");

587

588

if (sep.status === "completed" && sep.speakers) {

589

for (const speaker of sep.speakers) {

590

const audio = await client.voices.pvc.samples.speakers.audio.get(

591

"voiceId",

592

"sampleId",

593

speaker.speakerId

594

);

595

// Process speaker audio

596

}

597

}

598

```

599

600

### Voice Design

601

602

```typescript

603

// Design voice from description

604

const design = await client.textToVoice.design({

605

voiceDescription: "Deep authoritative male voice with British accent",

606

autoGenerateText: true,

607

modelId: "eleven_ttv_v3",

608

guidanceScale: 3.0,

609

quality: 0.8

610

});

611

612

// Create voice from preview

613

const voice = await client.textToVoice.create({

614

voiceName: "Documentary Narrator",

615

voiceDescription: "Deep authoritative voice",

616

generatedVoiceId: design.previews[0].generatedVoiceId,

617

labels: { use_case: "documentary", accent: "british" }

618

});

619

```

620

621

### Voice Remix

622

623

```typescript

624

// Modify existing voice

625

const remixed = await client.textToVoice.remix({

626

voiceId: "existing_voice_id",

627

voiceModification: "Make the voice sound younger and more energetic",

628

autoGenerateText: true,

629

modelId: "eleven_ttv_v3"

630

});

631

632

// Create from remixed preview

633

const newVoice = await client.textToVoice.create({

634

voiceName: "Energetic Version",

635

voiceDescription: "Younger, more energetic voice",

636

generatedVoiceId: remixed.previews[0].generatedVoiceId

637

});

638

```

639

640

### Update Voice Settings

641

642

```typescript

643

const settings = await client.voices.updateSettings("voiceId", {

644

stability: 0.7,

645

similarityBoost: 0.8,

646

style: 0.3,

647

useSpeakerBoost: true

648

});

649

```

650

651

### Update Voice

652

653

```typescript

654

// Update name and description

655

await client.voices.update("voiceId", {

656

name: "Updated Name",

657

description: "Updated description"

658

});

659

660

// Add new samples

661

await client.voices.update("voiceId", {

662

name: "My Voice",

663

files: [createReadStream("/path/new_sample.mp3")],

664

removeBackgroundNoise: true

665

});

666

```

667

668

## Error Handling

669

670

```typescript

671

import { ElevenLabsClient, ElevenLabsError } from "@elevenlabs/elevenlabs-js";

672

673

try {

674

const voice = await client.voices.get("voiceId");

675

} catch (error) {

676

if (error instanceof ElevenLabsError) {

677

console.error(`API error ${error.statusCode}: ${error.message}`);

678

}

679

throw error;

680

}

681

```

682

683

## Edge Cases and Important Notes

684

685

### Voice Search Constraints

686

- Voice IDs in search: max 100 per request

687

- Page size: max 100, default 10

688

- `includeTotalCount`: incurs performance cost, use sparingly

689

- Search queries match: name, description, labels, category

690

691

### Voice Cloning Limits

692

- IVC: 1-25 audio samples required

693

- Sample file size: typically max 10MB per file

694

- Total sample duration: minimum varies by voice type

695

- PVC: requires more samples (typically 10+) for best quality

696

- Background noise removal: only use if samples actually have noise (can degrade quality if not)

697

698

### Voice Design Constraints

699

- Text length: 100-1000 characters

700

- Labels: must be serialized JSON string for create/update operations

701

- Reference audio: only supported with `eleven_ttv_v3` model

702

- Guidance scale: lower (2-3) with longer prompts works better

703

- Quality vs variety: higher quality = less variety in generated voices

704

705

### PVC Training Workflow

706

- Training is async: check `fineTuning.fineTuningState` on Voice object for status

707

- States: `draft`, `not_verified`, `not_started`, `queued`, `fine_tuning`, `fine_tuned`, `failed`, `delayed`

708

- Training can take hours depending on sample count and quality

709

- Monitor status by polling `voices.get()` periodically

710

- Failed training: check sample quality and quantity

711

712

### Speaker Separation

713

- Async operation: poll status with `speakers.get()`

714

- Multi-speaker samples: separation required before using in PVC

715

- Separation can take several minutes for long samples

716

- Status: `pending`, `processing`, `completed`, `failed`

717

718

### Error Scenarios

719

720

```typescript

721

import { ElevenLabsClient, ElevenLabsError } from "@elevenlabs/elevenlabs-js";

722

723

// Handle invalid voice ID

724

try {

725

const voice = await client.voices.get("invalid_id");

726

} catch (error) {

727

if (error instanceof ElevenLabsError && error.statusCode === 404) {

728

console.error("Voice not found");

729

// Search for similar voices

730

const results = await client.voices.search({ search: "similar name" });

731

}

732

}

733

734

// Handle sample upload errors

735

try {

736

const voice = await client.voices.ivc.create({

737

name: "Test Voice",

738

files: [/* samples */]

739

});

740

} catch (error) {

741

if (error instanceof ElevenLabsError) {

742

if (error.statusCode === 400) {

743

console.error("Invalid sample format or quality");

744

} else if (error.statusCode === 413) {

745

console.error("Sample files too large");

746

} else if (error.statusCode === 422) {

747

console.error("Validation error:", error.body);

748

// Check: sample count, file format, duration

749

}

750

}

751

}

752

753

// Handle PVC training failures

754

async function monitorPvcTraining(voiceId: string) {

755

const maxWaitTime = 3600000; // 1 hour

756

const startTime = Date.now();

757

const pollInterval = 30000; // 30 seconds

758

759

while (Date.now() - startTime < maxWaitTime) {

760

const voice = await client.voices.get(voiceId);

761

const state = voice.fineTuning?.fineTuningState;

762

763

if (state === "fine_tuned") {

764

console.log("Training completed successfully!");

765

return voice;

766

} else if (state === "failed") {

767

throw new Error("PVC training failed - check sample quality");

768

} else if (state === "fine_tuning") {

769

console.log("Training in progress...");

770

}

771

772

await new Promise(resolve => setTimeout(resolve, pollInterval));

773

}

774

775

throw new Error("Training timeout");

776

}

777

```

778

779

## Comprehensive Examples

780

781

### Complete PVC Workflow with Error Handling

782

783

```typescript

784

import { createReadStream } from "fs";

785

import * as fs from "fs";

786

import { ElevenLabsClient, ElevenLabsError } from "@elevenlabs/elevenlabs-js";

787

788

async function createProfessionalVoice(

789

name: string,

790

language: string,

791

samplePaths: string[]

792

) {

793

const client = new ElevenLabsClient({

794

apiKey: process.env.ELEVENLABS_API_KEY

795

});

796

797

try {

798

// Step 1: Validate samples before creating voice

799

const validatedSamples = await Promise.all(

800

samplePaths.map(async (path) => {

801

const stats = fs.statSync(path);

802

if (stats.size > 10 * 1024 * 1024) {

803

throw new Error(`Sample ${path} exceeds 10MB limit`);

804

}

805

806

// Check file extension

807

const ext = path.split('.').pop()?.toLowerCase();

808

if (!['wav', 'mp3', 'm4a'].includes(ext || '')) {

809

throw new Error(`Unsupported format: ${ext}`);

810

}

811

812

return createReadStream(path);

813

})

814

);

815

816

if (validatedSamples.length < 10) {

817

throw new Error("PVC requires at least 10 samples for best quality");

818

}

819

820

// Step 2: Create PVC voice

821

const voice = await client.voices.pvc.create({

822

name,

823

language,

824

description: `Professional voice clone for ${language}`,

825

labels: {

826

created_via: "sdk",

827

sample_count: validatedSamples.length.toString(),

828

language

829

}

830

});

831

832

console.log(`Created PVC voice: ${voice.voiceId}`);

833

834

// Step 3: Add samples with error handling

835

let addedSamples = 0;

836

for (const sample of validatedSamples) {

837

try {

838

await client.voices.pvc.samples.create(voice.voiceId, {

839

files: [sample],

840

removeBackgroundNoise: true // Only if samples have noise

841

});

842

addedSamples++;

843

} catch (error) {

844

if (error instanceof ElevenLabsError && error.statusCode === 413) {

845

console.warn(`Sample too large, skipping...`);

846

continue;

847

}

848

throw error;

849

}

850

}

851

852

if (addedSamples < 10) {

853

throw new Error(`Only ${addedSamples} samples added, need at least 10`);

854

}

855

856

// Step 4: Start training

857

await client.voices.pvc.train(voice.voiceId, {

858

modelId: "eleven_multilingual_v2" // Optional: specify model

859

});

860

861

console.log("Training started, monitoring progress...");

862

863

// Step 5: Monitor training

864

return await monitorPvcTraining(voice.voiceId);

865

866

} catch (error) {

867

if (error instanceof ElevenLabsError) {

868

console.error(`PVC creation failed: ${error.statusCode} - ${error.message}`);

869

}

870

throw error;

871

}

872

}

873

874

async function monitorPvcTraining(voiceId: string) {

875

const client = new ElevenLabsClient({

876

apiKey: process.env.ELEVENLABS_API_KEY

877

});

878

879

const maxWaitTime = 2 * 60 * 60 * 1000; // 2 hours

880

const startTime = Date.now();

881

const pollInterval = 60000; // 1 minute

882

883

while (Date.now() - startTime < maxWaitTime) {

884

const voice = await client.voices.get(voiceId);

885

const state = voice.fineTuning?.fineTuningState;

886

887

console.log(`Training state: ${state}`);

888

889

if (state === "fine_tuned") {

890

console.log("✅ Training completed successfully!");

891

return voice;

892

} else if (state === "failed") {

893

throw new Error("PVC training failed - check sample quality and quantity");

894

} else if (state === "delayed") {

895

console.log("Training delayed, continuing to wait...");

896

}

897

898

await new Promise(resolve => setTimeout(resolve, pollInterval));

899

}

900

901

throw new Error("Training timeout - check status manually");

902

}

903

```

904

905

### Voice Design with Iterative Refinement

906

907

```typescript

908

async function designVoiceWithRefinement(

909

initialDescription: string,

910

refinementPrompts: string[]

911

) {

912

const client = new ElevenLabsClient({

913

apiKey: process.env.ELEVENLABS_API_KEY

914

});

915

916

let currentDescription = initialDescription;

917

let sessionId: string | undefined;

918

919

// Initial design

920

let design = await client.textToVoice.design({

921

voiceDescription: currentDescription,

922

autoGenerateText: true,

923

modelId: "eleven_ttv_v3",

924

guidanceScale: 3.0,

925

quality: 0.8

926

});

927

928

sessionId = `session_${Date.now()}`;

929

930

// Iterative refinement

931

for (const refinement of refinementPrompts) {

932

const remixDescription = `${currentDescription}. ${refinement}`;

933

934

const remixed = await client.textToVoice.remix("existing_voice_id", {

935

voiceDescription: remixDescription,

936

autoGenerateText: true,

937

remixingSessionId: sessionId,

938

modelId: "eleven_ttv_v3",

939

guidanceScale: 3.0

940

});

941

942

// Review previews and select best

943

const bestPreview = remixed.previews[0]; // Or implement selection logic

944

945

// Create voice from preview

946

const voice = await client.textToVoice.create({

947

voiceName: `Refined Voice ${Date.now()}`,

948

voiceDescription: remixDescription,

949

generatedVoiceId: bestPreview.generatedVoiceId

950

});

951

952

currentDescription = remixDescription;

953

console.log(`Created refined voice: ${voice.voiceId}`);

954

}

955

956

return design;

957

}

958

```

959

960

### Voice Search with Pagination and Filtering

961

962

```typescript

963

async function searchVoicesComprehensively(

964

searchTerm?: string,

965

filters?: {

966

category?: string;

967

voiceType?: string;

968

fineTuningState?: string;

969

}

970

) {

971

const client = new ElevenLabsClient({

972

apiKey: process.env.ELEVENLABS_API_KEY

973

});

974

975

const allVoices: Voice[] = [];

976

let nextPageToken: string | undefined;

977

let hasMore = true;

978

979

while (hasMore) {

980

try {

981

const results = await client.voices.search({

982

search: searchTerm,

983

pageSize: 100, // Max page size

984

nextPageToken,

985

category: filters?.category,

986

voiceType: filters?.voiceType,

987

fineTuningState: filters?.fineTuningState,

988

includeTotalCount: allVoices.length === 0 // Only on first page

989

});

990

991

allVoices.push(...results.voices);

992

hasMore = results.hasMore;

993

nextPageToken = results.nextPageToken;

994

995

console.log(`Fetched ${allVoices.length} voices...`);

996

997

} catch (error) {

998

if (error instanceof ElevenLabsError && error.statusCode === 429) {

999

// Rate limited - wait and retry

1000

await new Promise(resolve => setTimeout(resolve, 2000));

1001

continue;

1002

}

1003

throw error;

1004

}

1005

}

1006

1007

return allVoices;

1008

}

1009

```

1010