or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

index.mdload-balancing.mdperformance-monitoring.mdpool-management.mdtask-cancellation.mdtask-queues.mdtransferable-objects.md

load-balancing.mddocs/

0

# Load Balancing

1

2

Intelligent task distribution across available workers with built-in least-busy balancer and support for custom balancing strategies.

3

4

## Capabilities

5

6

### PiscinaLoadBalancer Type

7

8

Function type for implementing custom load balancing strategies.

9

10

```typescript { .api }

11

/**

12

* Load balancer function type

13

* @param task - Task to be distributed

14

* @param workers - Available workers to choose from

15

* @returns Selected worker or null if none suitable

16

*/

17

type PiscinaLoadBalancer = (

18

task: PiscinaTask,

19

workers: PiscinaWorker[]

20

) => PiscinaWorker | null;

21

```

22

23

### LeastBusyBalancer

24

25

Built-in load balancer that distributes tasks to the least busy available workers.

26

27

```typescript { .api }

28

/**

29

* Creates a least-busy load balancer

30

* @param opts - Balancer configuration options

31

* @returns Load balancer function

32

*/

33

function LeastBusyBalancer(

34

opts: LeastBusyBalancerOptions

35

): PiscinaLoadBalancer;

36

37

/**

38

* Configuration options for LeastBusyBalancer

39

*/

40

interface LeastBusyBalancerOptions {

41

/** Maximum concurrent tasks per worker before considering overloaded */

42

maximumUsage: number;

43

}

44

```

45

46

**Usage Examples:**

47

48

```typescript

49

import { Piscina, LeastBusyBalancer } from "piscina";

50

51

// Default least-busy balancer (used automatically)

52

const pool = new Piscina({

53

filename: "worker.js",

54

maxThreads: 4,

55

concurrentTasksPerWorker: 2

56

// LeastBusyBalancer is used by default

57

});

58

59

// Explicit least-busy balancer with custom settings

60

const customBalancedPool = new Piscina({

61

filename: "worker.js",

62

maxThreads: 8,

63

concurrentTasksPerWorker: 3,

64

loadBalancer: LeastBusyBalancer({ maximumUsage: 3 })

65

});

66

67

// High-concurrency configuration

68

const highConcurrencyPool = new Piscina({

69

filename: "worker.js",

70

maxThreads: 16,

71

concurrentTasksPerWorker: 4,

72

loadBalancer: LeastBusyBalancer({ maximumUsage: 4 })

73

});

74

```

75

76

### PiscinaWorker Interface

77

78

Interface representing workers available for load balancing decisions.

79

80

```typescript { .api }

81

/**

82

* Worker interface for load balancing

83

*/

84

interface PiscinaWorker {

85

/** Unique worker thread ID */

86

readonly id: number;

87

88

/** Current number of tasks being processed */

89

readonly currentUsage: number;

90

91

/** Whether worker is running an abortable task */

92

readonly isRunningAbortableTask: boolean;

93

94

/** Performance histogram for this worker (if enabled) */

95

readonly histogram: PiscinaHistogramSummary | null;

96

97

/** Whether worker is in termination process */

98

readonly terminating: boolean;

99

100

/** Whether worker has been destroyed */

101

readonly destroyed: boolean;

102

}

103

```

104

105

### Custom Load Balancer Implementation

106

107

You can implement custom load balancing strategies for specialized requirements.

108

109

**Usage Examples:**

110

111

```typescript

112

import { Piscina, PiscinaLoadBalancer, PiscinaTask, PiscinaWorker } from "piscina";

113

114

// Round-robin load balancer

115

let roundRobinIndex = 0;

116

const roundRobinBalancer: PiscinaLoadBalancer = (task, workers) => {

117

const availableWorkers = workers.filter(w =>

118

!w.terminating &&

119

!w.destroyed &&

120

w.currentUsage < 2

121

);

122

123

if (availableWorkers.length === 0) return null;

124

125

const worker = availableWorkers[roundRobinIndex % availableWorkers.length];

126

roundRobinIndex++;

127

return worker;

128

};

129

130

// Priority-based load balancer

131

const priorityBalancer: PiscinaLoadBalancer = (task, workers) => {

132

// High priority tasks get dedicated workers

133

const taskPriority = (task as any).priority || 0;

134

135

if (taskPriority > 5) {

136

// Find completely idle worker for high priority

137

const idleWorker = workers.find(w =>

138

w.currentUsage === 0 && !w.terminating && !w.destroyed

139

);

140

if (idleWorker) return idleWorker;

141

}

142

143

// Fall back to least busy

144

let bestWorker: PiscinaWorker | null = null;

145

let minUsage = Infinity;

146

147

for (const worker of workers) {

148

if (worker.terminating || worker.destroyed) continue;

149

if (worker.isRunningAbortableTask && !task.isAbortable) continue;

150

151

if (worker.currentUsage < minUsage) {

152

minUsage = worker.currentUsage;

153

bestWorker = worker;

154

}

155

}

156

157

return bestWorker;

158

};

159

160

// Performance-based load balancer

161

const performanceBalancer: PiscinaLoadBalancer = (task, workers) => {

162

const availableWorkers = workers.filter(w =>

163

!w.terminating &&

164

!w.destroyed &&

165

w.currentUsage < 3

166

);

167

168

if (availableWorkers.length === 0) return null;

169

170

// Prefer workers with better performance history

171

return availableWorkers.sort((a, b) => {

172

const aAvg = a.histogram?.average || 0;

173

const bAvg = b.histogram?.average || 0;

174

return aAvg - bAvg; // Lower average runtime is better

175

})[0];

176

};

177

178

// Use custom balancers

179

const roundRobinPool = new Piscina({

180

filename: "worker.js",

181

loadBalancer: roundRobinBalancer

182

});

183

184

const priorityPool = new Piscina({

185

filename: "worker.js",

186

loadBalancer: priorityBalancer,

187

workerHistogram: true // Enable for performance tracking

188

});

189

190

const performancePool = new Piscina({

191

filename: "worker.js",

192

loadBalancer: performanceBalancer,

193

workerHistogram: true

194

});

195

```

196

197

### Load Balancing Strategies

198

199

#### Least Busy Strategy (Default)

200

201

The default `LeastBusyBalancer` implements the following algorithm:

202

203

1. **Idle Worker Priority**: Always prefer workers with `currentUsage === 0`

204

2. **Abortable Task Handling**: Avoid workers running abortable tasks for non-abortable tasks

205

3. **Usage Threshold**: Respect `maximumUsage` setting to prevent overloading

206

4. **Least Loaded**: Among available workers, choose the one with lowest `currentUsage`

207

208

```typescript

209

// Equivalent implementation of LeastBusyBalancer logic

210

const leastBusyLogic: PiscinaLoadBalancer = (task, workers) => {

211

let candidate: PiscinaWorker | null = null;

212

let checkpoint = maximumUsage;

213

214

for (const worker of workers) {

215

// Skip terminating or destroyed workers

216

if (worker.terminating || worker.destroyed) continue;

217

218

// Prioritize completely idle workers

219

if (worker.currentUsage === 0) {

220

candidate = worker;

221

break;

222

}

223

224

// Don't assign non-abortable tasks to workers running abortable tasks

225

if (worker.isRunningAbortableTask) continue;

226

227

// Find least busy worker under threshold

228

if (!task.isAbortable && worker.currentUsage < checkpoint) {

229

candidate = worker;

230

checkpoint = worker.currentUsage;

231

}

232

}

233

234

return candidate;

235

};

236

```

237

238

#### Custom Strategy Guidelines

239

240

When implementing custom load balancers:

241

242

1. **Always check worker status**: Filter out `terminating` and `destroyed` workers

243

2. **Handle abortable tasks**: Consider `isRunningAbortableTask` and `task.isAbortable`

244

3. **Respect capacity**: Don't overload workers beyond reasonable limits

245

4. **Return null**: If no suitable worker is found, return `null` to trigger worker creation

246

5. **Performance considerations**: Keep balancer logic fast as it runs for every task

247

248

**Advanced Custom Balancer:**

249

250

```typescript

251

// Locality-aware load balancer (conceptual)

252

const localityAwareBalancer: PiscinaLoadBalancer = (task, workers) => {

253

const availableWorkers = workers.filter(w =>

254

!w.terminating && !w.destroyed

255

);

256

257

if (availableWorkers.length === 0) return null;

258

259

// Group workers by some locality criterion

260

const taskLocality = (task as any).locality || 'default';

261

const localWorkers = availableWorkers.filter(w =>

262

(w as any).locality === taskLocality

263

);

264

265

const candidatePool = localWorkers.length > 0 ? localWorkers : availableWorkers;

266

267

// Apply least-busy within locality group

268

return candidatePool.reduce((best, current) => {

269

if (!best) return current;

270

return current.currentUsage < best.currentUsage ? current : best;

271

}, null as PiscinaWorker | null);

272

};

273

274

// Weighted load balancer

275

const weightedBalancer: PiscinaLoadBalancer = (task, workers) => {

276

const availableWorkers = workers.filter(w =>

277

!w.terminating && !w.destroyed && w.currentUsage < 5

278

);

279

280

if (availableWorkers.length === 0) return null;

281

282

// Calculate weighted scores (lower is better)

283

const scores = availableWorkers.map(worker => ({

284

worker,

285

score: worker.currentUsage * 1.0 +

286

(worker.histogram?.average || 0) * 0.1 +

287

(worker.isRunningAbortableTask ? 0.5 : 0)

288

}));

289

290

// Return worker with lowest score

291

return scores.reduce((best, current) =>

292

current.score < best.score ? current : best

293

).worker;

294

};

295

```

296

297

### Monitoring Load Balancing

298

299

Track load balancing effectiveness using worker metrics:

300

301

```typescript

302

import { Piscina } from "piscina";

303

304

const pool = new Piscina({

305

filename: "worker.js",

306

maxThreads: 4,

307

workerHistogram: true

308

});

309

310

// Monitor worker distribution

311

setInterval(() => {

312

console.log('\nWorker Load Distribution:');

313

pool.threads.forEach((thread, index) => {

314

const workerInfo = pool.threads[index];

315

console.log(`Worker ${index}: ${workerInfo.currentUsage} tasks`);

316

});

317

318

console.log(`Queue size: ${pool.queueSize}`);

319

console.log(`Pool utilization: ${(pool.utilization * 100).toFixed(2)}%`);

320

}, 5000);

321

322

// Track worker creation/destruction

323

pool.on('workerCreate', (worker) => {

324

console.log(`Created worker ${worker.id}, total: ${pool.threads.length}`);

325

});

326

327

pool.on('workerDestroy', (worker) => {

328

console.log(`Destroyed worker ${worker.id}, total: ${pool.threads.length}`);

329

});

330

```