or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

audio-processing.mdconversational-ai.mddubbing.mdindex.mdmusic.mdrealtime.mdstudio.mdtext-to-speech.mdtranscription.mdvoices.mdworkspace.md

music.mddocs/

0

# Music Generation

1

2

## Music Client Methods

3

4

### music.compose()

5

6

```typescript { .api }

7

compose(

8

request?: {

9

prompt?: string; // Cannot use with compositionPlan

10

compositionPlan?: MusicPrompt; // Cannot use with prompt

11

musicLengthMs?: number; // 3000-300000ms, prompt only

12

outputFormat?: OutputFormat;

13

modelId?: "music_v1";

14

forceInstrumental?: boolean; // Prompt only

15

respectSectionsDurations?: boolean; // compositionPlan only

16

storeForInpainting?: boolean; // Enterprise only

17

},

18

options?: RequestOptions

19

): Promise<ReadableStream<Uint8Array>>

20

```

21

22

### music.composeDetailed()

23

24

```typescript { .api }

25

composeDetailed(

26

request?: {

27

prompt?: string;

28

compositionPlan?: MusicPrompt;

29

musicLengthMs?: number;

30

outputFormat?: OutputFormat;

31

modelId?: "music_v1";

32

forceInstrumental?: boolean;

33

storeForInpainting?: boolean;

34

},

35

options?: RequestOptions

36

): Promise<MultipartResponse>

37

38

interface MultipartResponse {

39

json: {

40

compositionPlan: CompositionPlan;

41

songMetadata: SongMetadata;

42

};

43

audio: Buffer;

44

filename: string;

45

}

46

47

interface SongMetadata {

48

title: string;

49

description: string;

50

genres: string[];

51

languages: string[];

52

is_explicit: boolean;

53

}

54

```

55

56

### music.stream()

57

58

```typescript { .api }

59

stream(

60

request?: {

61

prompt?: string;

62

compositionPlan?: MusicPrompt;

63

musicLengthMs?: number;

64

outputFormat?: OutputFormat;

65

modelId?: "music_v1";

66

forceInstrumental?: boolean;

67

storeForInpainting?: boolean;

68

},

69

options?: RequestOptions

70

): Promise<ReadableStream<Uint8Array>>

71

```

72

73

### music.separateStems()

74

75

```typescript { .api }

76

separateStems(

77

request: {

78

file: File | Blob | Buffer;

79

stemVariationId?: "two_stems_v1" | "six_stems_v1";

80

outputFormat?: OutputFormat;

81

},

82

options?: RequestOptions

83

): Promise<ReadableStream<Uint8Array>>

84

```

85

86

## Composition Plan

87

88

### music.compositionPlan.create()

89

90

```typescript { .api }

91

compositionPlan.create(

92

request: {

93

prompt: string;

94

musicLengthMs?: number; // 3000-300000ms

95

sourceCompositionPlan?: MusicPrompt;

96

modelId?: "music_v1";

97

},

98

options?: RequestOptions

99

): Promise<MusicPrompt>

100

```

101

102

No credit cost, subject to rate limiting.

103

104

## Types

105

106

### MusicPrompt

107

108

```typescript { .api }

109

interface MusicPrompt {

110

positiveGlobalStyles?: string[];

111

negativeGlobalStyles?: string[];

112

sections: Section[];

113

}

114

115

interface Section {

116

sectionName: string;

117

durationMs: number;

118

positiveStyles?: string[];

119

negativeStyles?: string[];

120

lines: string[]; // Lyrics or instrumental descriptions

121

}

122

```

123

124

### OutputFormat

125

126

Same as TTS output formats: `mp3_*`, `pcm_*`, `opus_*`, etc.

127

128

## Usage Examples

129

130

### Simple Prompt

131

132

```typescript

133

import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";

134

135

const client = new ElevenLabsClient({ apiKey: process.env.ELEVENLABS_API_KEY });

136

137

const audio = await client.music.compose({

138

prompt: "Epic orchestral music with dramatic strings",

139

musicLengthMs: 60000,

140

outputFormat: "mp3_44100_128"

141

});

142

143

// Save to file

144

import fs from "fs";

145

const fileStream = fs.createWriteStream("output.mp3");

146

for await (const chunk of audio) {

147

fileStream.write(chunk);

148

}

149

fileStream.end();

150

```

151

152

### Detailed Response with Metadata

153

154

```typescript

155

const response = await client.music.composeDetailed({

156

prompt: "Progressive rock about space exploration",

157

musicLengthMs: 120000

158

});

159

160

console.log("Title:", response.json.songMetadata.title);

161

console.log("Genres:", response.json.songMetadata.genres);

162

163

for (const section of response.json.compositionPlan.sections) {

164

console.log(`${section.sectionName} (${section.durationMs}ms)`);

165

console.log(`Lyrics: ${section.lines.join(" / ")}`);

166

}

167

168

fs.writeFileSync(response.filename, response.audio);

169

```

170

171

### Streaming

172

173

```typescript

174

const stream = await client.music.stream({

175

prompt: "Upbeat electronic dance music",

176

musicLengthMs: 90000,

177

outputFormat: "mp3_44100_128"

178

});

179

180

for await (const chunk of stream) {

181

audioPlayer.feed(chunk);

182

}

183

```

184

185

### Composition Plan

186

187

```typescript

188

// Create plan

189

const plan = await client.music.compositionPlan.create({

190

prompt: "Melancholic jazz ballad about lost love",

191

musicLengthMs: 180000

192

});

193

194

console.log("Styles:", plan.positiveGlobalStyles);

195

console.log("Sections:", plan.sections.length);

196

197

// Use plan to generate

198

const response = await client.music.composeDetailed({

199

compositionPlan: plan

200

});

201

```

202

203

### Stem Separation

204

205

```typescript

206

import fs from "fs";

207

208

const audioFile = fs.readFileSync("song.mp3");

209

210

// Separate into 6 stems (vocals, drums, bass, guitar, keys, other)

211

const stems = await client.music.separateStems({

212

file: audioFile,

213

stemVariationId: "six_stems_v1",

214

outputFormat: "mp3_44100_128"

215

});

216

217

// Save stems (returns ZIP)

218

const output = fs.createWriteStream("stems.zip");

219

for await (const chunk of stems) {

220

output.write(chunk);

221

}

222

output.end();

223

224

// Two stems variation (vocals and accompaniment)

225

const twoStems = await client.music.separateStems({

226

file: audioFile,

227

stemVariationId: "two_stems_v1"

228

});

229

```

230

231

### Custom Composition Plan

232

233

```typescript

234

const customPlan: MusicPrompt = {

235

positiveGlobalStyles: ["rock", "energetic"],

236

negativeGlobalStyles: ["slow", "ballad"],

237

sections: [

238

{

239

sectionName: "intro",

240

durationMs: 15000,

241

positiveStyles: ["atmospheric"],

242

lines: ["instrumental intro"]

243

},

244

{

245

sectionName: "verse",

246

durationMs: 30000,

247

lines: [

248

"Walking through the city lights",

249

"Everything feels right tonight"

250

]

251

},

252

{

253

sectionName: "chorus",

254

durationMs: 25000,

255

positiveStyles: ["powerful", "anthemic"],

256

lines: [

257

"We're alive, we're alive",

258

"Feel the rhythm deep inside"

259

]

260

}

261

]

262

};

263

264

const audio = await client.music.compose({

265

compositionPlan: customPlan,

266

respectSectionsDurations: true

267

});

268

```

269

270

## Important Notes

271

272

- Duration: 3-300 seconds (3000-300000ms)

273

- `prompt` and `compositionPlan`: mutually exclusive

274

- `forceInstrumental`: only works with `prompt`

275

- `respectSectionsDurations`: only with `compositionPlan`

276

- `true`: Strict section duration enforcement

277

- `false`: Model may adjust durations for quality/latency

278

- `storeForInpainting`: Enterprise feature only

279

- Stem separation: May have high latency for long files

280

- `compositionPlan.create()`: No credit cost, rate limited

281