or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

agents.mdchains.mddocument-loaders.mdembeddings.mdexperimental.mdindex.mdmemory.mdoutput-parsers.mdretrievers.mdtools.mdutilities.md
tile.json

experimental.mddocs/

0

# Experimental Features

1

2

Cutting-edge features including AutoGPT, BabyAGI, generative agents, and advanced planning systems. These experimental components showcase the latest developments in autonomous AI systems and advanced agent architectures.

3

4

**Note**: Experimental features are subject to breaking changes and may not be suitable for production use. APIs may change without notice between versions.

5

6

## Capabilities

7

8

### AutoGPT Implementation

9

10

Implementation of AutoGPT architecture for autonomous task execution and planning.

11

12

```typescript { .api }

13

/**

14

* AutoGPT agent implementation

15

*/

16

class AutoGPT extends BaseAgent {

17

constructor(fields: AutoGPTInput);

18

19

/** AI name identifier */

20

aiName: string;

21

22

/** AI role description */

23

aiRole: string;

24

25

/** Available tools */

26

tools: BaseTool[];

27

28

/** Language model */

29

llm: BaseLanguageModelInterface;

30

31

/** Memory for maintaining context */

32

memory: VectorStoreRetrieverMemory;

33

34

/** Human input mode */

35

humanInTheLoop?: boolean;

36

37

/** Output parser for commands */

38

outputParser: AutoGPTOutputParser;

39

40

/** Maximum iterations */

41

maxIterations?: number;

42

43

/** Plan and execute actions autonomously */

44

plan(

45

steps: AgentStep[],

46

inputs: ChainValues,

47

runManager?: CallbackManagerForChainRun

48

): Promise<AgentAction | AgentFinish>;

49

50

/** Execute the full AutoGPT loop */

51

run(goals: string[]): Promise<string>;

52

53

static fromLLMAndTools(

54

llm: BaseLanguageModelInterface,

55

tools: BaseTool[],

56

opts?: Partial<AutoGPTInput>

57

): AutoGPT;

58

}

59

60

/**

61

* AutoGPT prompt template

62

*/

63

class AutoGPTPrompt extends BasePromptTemplate {

64

constructor(fields: AutoGPTPromptInput);

65

66

/** AI name */

67

aiName: string;

68

69

/** AI role description */

70

aiRole: string;

71

72

/** Available tools */

73

tools: BaseTool[];

74

75

/** Token counter */

76

tokenCounter: (text: string) => Promise<number>;

77

78

/** Send token limit */

79

sendTokenLimit?: number;

80

81

format(values: InputValues): Promise<string>;

82

83

static fromLLMAndTools(

84

llm: BaseLanguageModelInterface,

85

tools: BaseTool[],

86

opts?: Partial<AutoGPTPromptInput>

87

): AutoGPTPrompt;

88

}

89

90

/**

91

* AutoGPT output parser

92

*/

93

class AutoGPTOutputParser extends BaseOutputParser<AutoGPTAction> {

94

parse(text: string): Promise<AutoGPTAction>;

95

getFormatInstructions(): string;

96

}

97

98

/**

99

* Preprocess JSON input for AutoGPT

100

*/

101

function preprocessJsonInput(text: string): string;

102

```

103

104

**Usage Example:**

105

106

```typescript

107

import {

108

AutoGPT,

109

AutoGPTPrompt,

110

preprocessJsonInput

111

} from "langchain/experimental/autogpt";

112

import { OpenAI } from "@langchain/openai";

113

import { DynamicTool } from "langchain/tools";

114

import { MemoryVectorStore } from "langchain/vectorstores/memory";

115

import { OpenAIEmbeddings } from "@langchain/openai";

116

117

// Create tools for AutoGPT

118

const tools = [

119

new DynamicTool({

120

name: "search_web",

121

description: "Search the web for information",

122

func: async (query: string) => {

123

// Implement web search

124

return `Search results for: ${query}`;

125

}

126

}),

127

new DynamicTool({

128

name: "write_file",

129

description: "Write content to a file",

130

func: async (input: string) => {

131

// Implement file writing

132

const [filename, content] = input.split("|");

133

return `Written content to ${filename}`;

134

}

135

})

136

];

137

138

// Create memory for context

139

const embeddings = new OpenAIEmbeddings();

140

const vectorStore = new MemoryVectorStore(embeddings);

141

const memory = new VectorStoreRetrieverMemory({

142

vectorStoreRetriever: vectorStore.asRetriever(),

143

memoryKey: "chat_history"

144

});

145

146

// Create AutoGPT instance

147

const llm = new OpenAI({ temperature: 0.7 });

148

const autoGPT = AutoGPT.fromLLMAndTools(llm, tools, {

149

aiName: "ResearchAssistant",

150

aiRole: "Research and analysis assistant",

151

memory,

152

maxIterations: 10,

153

humanInTheLoop: false

154

});

155

156

// Run autonomous task

157

const result = await autoGPT.run([

158

"Research the latest developments in AI",

159

"Create a summary of key findings",

160

"Save the summary to a file"

161

]);

162

```

163

164

### BabyAGI Implementation

165

166

Implementation of BabyAGI task management and execution system.

167

168

```typescript { .api }

169

/**

170

* BabyAGI task execution system

171

*/

172

class BabyAGI {

173

constructor(fields: BabyAGIInput);

174

175

/** Task creation chain */

176

creationChain: LLMChain;

177

178

/** Task prioritization chain */

179

prioritizationChain: LLMChain;

180

181

/** Task execution chain */

182

executionChain: BaseChain;

183

184

/** Vector store for task results */

185

vectorstore: VectorStoreInterface;

186

187

/** Task list */

188

taskList: Task[];

189

190

/** Task ID counter */

191

taskIdCounter: number;

192

193

/** Maximum iterations */

194

maxIterations?: number;

195

196

/** Add initial task */

197

addTask(task: Task): void;

198

199

/** Print task list */

200

printTaskList(): void;

201

202

/** Print next task */

203

printNextTask(task: Task): void;

204

205

/** Print task result */

206

printTaskResult(result: string): void;

207

208

/** Run the BabyAGI loop */

209

run(): Promise<void>;

210

211

static fromLLM(

212

llm: BaseLanguageModelInterface,

213

vectorstore: VectorStoreInterface,

214

opts?: Partial<BabyAGIInput>

215

): BabyAGI;

216

}

217

218

interface Task {

219

taskID: number;

220

taskName: string;

221

}

222

```

223

224

**Usage Example:**

225

226

```typescript

227

import { BabyAGI } from "langchain/experimental/babyagi";

228

import { OpenAI } from "@langchain/openai";

229

import { MemoryVectorStore } from "langchain/vectorstores/memory";

230

import { OpenAIEmbeddings } from "@langchain/openai";

231

232

const llm = new OpenAI({ temperature: 0 });

233

const embeddings = new OpenAIEmbeddings();

234

const vectorstore = new MemoryVectorStore(embeddings);

235

236

const babyAGI = BabyAGI.fromLLM(llm, vectorstore, {

237

maxIterations: 5,

238

verbose: true

239

});

240

241

// Add initial objective

242

babyAGI.addTask({

243

taskID: 1,

244

taskName: "Research and write a blog post about sustainable energy"

245

});

246

247

// Run the task management loop

248

await babyAGI.run();

249

```

250

251

### Generative Agents

252

253

Implementation of generative agents with memory and personality.

254

255

```typescript { .api }

256

/**

257

* Generative agent with memory and personality

258

*/

259

class GenerativeAgent {

260

constructor(fields: GenerativeAgentInput);

261

262

/** Agent name */

263

name: string;

264

265

/** Agent age */

266

age?: number;

267

268

/** Agent traits */

269

traits: string;

270

271

/** Agent status */

272

status: string;

273

274

/** Memory retriever */

275

memoryRetriever: TimeWeightedVectorStoreRetriever;

276

277

/** Language model */

278

llm: BaseLanguageModelInterface;

279

280

/** Memory importance */

281

memoryImportance: number;

282

283

/** Agent summary */

284

summary?: string;

285

286

/** Summary refresh seconds */

287

summaryRefreshSeconds?: number;

288

289

/** Get agent summary */

290

getSummary(config?: { forceRefresh?: boolean }): Promise<string>;

291

292

/** Get full agent name */

293

getFullName(): string;

294

295

/** Get agent characteristics */

296

getCharacteristics(): string;

297

298

/** Generate reaction to observation */

299

generateReaction(

300

observation: string,

301

suffix?: string

302

): Promise<[boolean, string]>;

303

304

/** Generate dialogue response */

305

generateDialogueResponse(

306

observation: string,

307

config?: { now?: Date }

308

): Promise<[boolean, string]>;

309

310

/** Add memory */

311

addMemory(memoryContent: string, config?: { now?: Date }): Promise<void>;

312

313

/** Pause to reflect */

314

pauseToReflect(config?: { now?: Date }): Promise<string[]>;

315

316

static create(

317

name: string,

318

memoryRetriever: TimeWeightedVectorStoreRetriever,

319

llm: BaseLanguageModelInterface,

320

opts?: Partial<GenerativeAgentInput>

321

): GenerativeAgent;

322

}

323

324

/**

325

* Memory for generative agents

326

*/

327

class GenerativeAgentMemory extends BaseMemory {

328

constructor(fields: GenerativeAgentMemoryInput);

329

330

/** Language model */

331

llm: BaseLanguageModelInterface;

332

333

/** Memory retriever */

334

memoryRetriever: TimeWeightedVectorStoreRetriever;

335

336

/** Verbose logging */

337

verbose: boolean;

338

339

/** Reflection threshold */

340

reflectionThreshold?: number;

341

342

/** Current plan */

343

currentPlan: string[];

344

345

/** Importance weight */

346

importanceWeight: number;

347

348

/** Aggregate importance */

349

aggregateImportance: number;

350

351

/** Most recent memories access time */

352

mostRecentMemoriesTokens: number;

353

354

/** Add memories */

355

addMemories(

356

memoryContent: string[],

357

config?: { now?: Date }

358

): Promise<void>;

359

360

get memoryKeys(): string[];

361

362

loadMemoryVariables(values: InputValues): Promise<MemoryVariables>;

363

364

saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;

365

}

366

```

367

368

### Plan and Execute Agents

369

370

Agents that separate planning and execution phases for complex task handling.

371

372

```typescript { .api }

373

/**

374

* Plan and execute agent

375

*/

376

class PlanAndExecuteAgent extends BaseAgent {

377

constructor(fields: PlanAndExecuteAgentInput);

378

379

/** Planner for creating task plans */

380

planner: BasePlanner;

381

382

/** Executor for running individual steps */

383

executor: BaseExecutor;

384

385

/** Step container */

386

stepContainer: BaseStepContainer;

387

388

plan(

389

steps: AgentStep[],

390

inputs: ChainValues,

391

runManager?: CallbackManagerForChainRun

392

): Promise<AgentAction | AgentFinish>;

393

394

static fromLLMAndTools(

395

llm: BaseLanguageModelInterface,

396

tools: BaseTool[],

397

opts?: Partial<PlanAndExecuteAgentInput>

398

): PlanAndExecuteAgent;

399

}

400

401

/**

402

* Base planner interface

403

*/

404

abstract class BasePlanner {

405

abstract plan(

406

inputs: PlannerInput,

407

runManager?: CallbackManagerForChainRun

408

): Promise<Plan>;

409

}

410

411

/**

412

* Base executor interface

413

*/

414

abstract class BaseExecutor {

415

abstract execute(

416

plan: Plan,

417

inputs: ChainValues,

418

runManager?: CallbackManagerForChainRun

419

): Promise<BaseExecutorOutput>;

420

}

421

422

/**

423

* Load planner from LLM and tools

424

*/

425

function loadPlanner(

426

llm: BaseLanguageModelInterface,

427

systemPrompt?: string

428

): BasePlanner;

429

430

/**

431

* Load executor from LLM and tools

432

*/

433

function loadExecutor(

434

llm: BaseLanguageModelInterface,

435

tools: BaseTool[],

436

systemPrompt?: string

437

): BaseExecutor;

438

```

439

440

### OpenAI Assistant Integration

441

442

Integration with OpenAI's Assistant API for advanced capabilities.

443

444

```typescript { .api }

445

/**

446

* OpenAI Assistant wrapper

447

*/

448

class OpenAIAssistantRunnable extends Runnable {

449

constructor(fields: OpenAIAssistantRunnableInput);

450

451

/** Assistant ID */

452

assistantId: string;

453

454

/** OpenAI client */

455

client: OpenAI;

456

457

/** Polling interval for run status */

458

pollIntervalMs: number;

459

460

/** Thread ID */

461

threadId?: string;

462

463

/** Invoke assistant */

464

invoke(

465

input: OpenAIAssistantRunnableInput,

466

options?: RunnableConfig

467

): Promise<OpenAIAssistantRunnableOutput>;

468

469

/** Stream assistant responses */

470

stream(

471

input: OpenAIAssistantRunnableInput,

472

options?: RunnableConfig

473

): AsyncGenerator<OpenAIAssistantRunnableOutput>;

474

475

static createAssistant(

476

options: CreateAssistantOptions,

477

client?: OpenAI

478

): Promise<OpenAIAssistantRunnable>;

479

}

480

```

481

482

### OpenAI Files Integration

483

484

Integration with OpenAI's Files API for document management.

485

486

```typescript { .api }

487

/**

488

* OpenAI Files API wrapper

489

*/

490

class OpenAIFiles {

491

constructor(client?: OpenAI);

492

493

/** OpenAI client */

494

client: OpenAI;

495

496

/** Upload file */

497

upload(

498

file: File | Buffer,

499

purpose: "fine-tune" | "assistants"

500

): Promise<FileObject>;

501

502

/** List files */

503

list(purpose?: string): Promise<FileObject[]>;

504

505

/** Retrieve file */

506

retrieve(fileId: string): Promise<FileObject>;

507

508

/** Delete file */

509

delete(fileId: string): Promise<void>;

510

511

/** Download file content */

512

download(fileId: string): Promise<Buffer>;

513

}

514

```

515

516

### Violation of Expectations Chain

517

518

Experimental chain for detecting and handling unexpected outcomes.

519

520

```typescript { .api }

521

/**

522

* Chain for handling violated expectations

523

*/

524

class ViolationOfExpectationsChain extends BaseChain {

525

constructor(fields: ViolationOfExpectationsChainInput);

526

527

/** Chain for prediction */

528

predictionChain: LLMChain;

529

530

/** Chain for creating revised prediction */

531

revisedPredictionChain: LLMChain;

532

533

/** Chain for extracting insights */

534

insightChain: LLMChain;

535

536

_call(values: ChainValues): Promise<ChainValues>;

537

538

static fromLLM(

539

llm: BaseLanguageModelInterface,

540

opts?: Partial<ViolationOfExpectationsChainInput>

541

): ViolationOfExpectationsChain;

542

}

543

```

544

545

### Data Masking Utilities

546

547

Experimental utilities for masking sensitive information.

548

549

```typescript { .api }

550

/**

551

* Data masking transformer

552

*/

553

class DataMaskingTransformer {

554

constructor(fields?: DataMaskingTransformerInput);

555

556

/** Masking patterns */

557

patterns: MaskingPattern[];

558

559

/** Transform text by masking sensitive data */

560

transform(text: string): string;

561

562

/** Add masking pattern */

563

addPattern(pattern: MaskingPattern): void;

564

565

/** Remove masking pattern */

566

removePattern(name: string): void;

567

}

568

569

interface MaskingPattern {

570

name: string;

571

regex: RegExp;

572

replacement: string;

573

}

574

```

575

576

### Custom Format Prompts

577

578

Experimental prompt templates with custom formatting.

579

580

```typescript { .api }

581

/**

582

* Custom format prompt template

583

*/

584

class CustomFormatPromptTemplate extends BasePromptTemplate {

585

constructor(fields: CustomFormatPromptTemplateInput);

586

587

/** Template string */

588

template: string;

589

590

/** Custom formatter function */

591

formatter: (template: string, values: InputValues) => string;

592

593

format(values: InputValues): Promise<string>;

594

595

static fromTemplate(

596

template: string,

597

formatter: (template: string, values: InputValues) => string

598

): CustomFormatPromptTemplate;

599

}

600

601

/**

602

* Handlebars prompt template

603

*/

604

class HandlebarsPromptTemplate extends BasePromptTemplate {

605

constructor(fields: HandlebarsPromptTemplateInput);

606

607

/** Handlebars template */

608

template: string;

609

610

/** Handlebars helpers */

611

helpers?: Record<string, Function>;

612

613

format(values: InputValues): Promise<string>;

614

615

static fromTemplate(

616

template: string,

617

helpers?: Record<string, Function>

618

): HandlebarsPromptTemplate;

619

}

620

```

621

622

## Types

623

624

### AutoGPT Types

625

626

```typescript { .api }

627

interface AutoGPTInput {

628

aiName: string;

629

aiRole: string;

630

tools: BaseTool[];

631

llm: BaseLanguageModelInterface;

632

memory: VectorStoreRetrieverMemory;

633

humanInTheLoop?: boolean;

634

outputParser?: AutoGPTOutputParser;

635

maxIterations?: number;

636

}

637

638

interface AutoGPTPromptInput {

639

aiName: string;

640

aiRole: string;

641

tools: BaseTool[];

642

tokenCounter: (text: string) => Promise<number>;

643

sendTokenLimit?: number;

644

}

645

646

interface AutoGPTAction {

647

name: string;

648

args: Record<string, any>;

649

}

650

```

651

652

### BabyAGI Types

653

654

```typescript { .api }

655

interface BabyAGIInput {

656

creationChain: LLMChain;

657

prioritizationChain: LLMChain;

658

executionChain: BaseChain;

659

vectorstore: VectorStoreInterface;

660

taskList?: Task[];

661

maxIterations?: number;

662

verbose?: boolean;

663

}

664

```

665

666

### Generative Agent Types

667

668

```typescript { .api }

669

interface GenerativeAgentInput {

670

name: string;

671

age?: number;

672

traits: string;

673

status: string;

674

memoryRetriever: TimeWeightedVectorStoreRetriever;

675

llm: BaseLanguageModelInterface;

676

memoryImportance?: number;

677

summary?: string;

678

summaryRefreshSeconds?: number;

679

verbose?: boolean;

680

}

681

682

interface GenerativeAgentMemoryInput {

683

llm: BaseLanguageModelInterface;

684

memoryRetriever: TimeWeightedVectorStoreRetriever;

685

verbose?: boolean;

686

reflectionThreshold?: number;

687

currentPlan?: string[];

688

importanceWeight?: number;

689

}

690

```

691

692

### Plan and Execute Types

693

694

```typescript { .api }

695

interface PlanAndExecuteAgentInput {

696

planner: BasePlanner;

697

executor: BaseExecutor;

698

stepContainer?: BaseStepContainer;

699

verbose?: boolean;

700

}

701

702

interface Plan {

703

steps: Step[];

704

}

705

706

interface Step {

707

id: string;

708

description: string;

709

dependencies?: string[];

710

}

711

712

interface PlannerInput {

713

objective: string;

714

context?: string;

715

}

716

717

interface BaseExecutorOutput {

718

result: string;

719

observations?: string[];

720

}

721

722

abstract class BaseStepContainer {

723

abstract addStep(step: Step, result: string): void;

724

abstract getSteps(): Array<{ step: Step; result?: string }>;

725

abstract getFinalResponse(): string;

726

}

727

```

728

729

### OpenAI Assistant Types

730

731

```typescript { .api }

732

interface OpenAIAssistantRunnableInput {

733

content: string;

734

threadId?: string;

735

runId?: string;

736

fileIds?: string[];

737

}

738

739

interface OpenAIAssistantRunnableOutput {

740

content: string;

741

threadId: string;

742

runId: string;

743

}

744

745

interface CreateAssistantOptions {

746

model: string;

747

name?: string;

748

description?: string;

749

instructions?: string;

750

tools?: Array<{ type: string; [key: string]: any }>;

751

fileIds?: string[];

752

metadata?: Record<string, string>;

753

}

754

```

755

756

### Data Masking Types

757

758

```typescript { .api }

759

interface DataMaskingTransformerInput {

760

patterns?: MaskingPattern[];

761

}

762

763

interface MaskingPattern {

764

name: string;

765

regex: RegExp;

766

replacement: string;

767

}

768

```

769

770

### Custom Format Types

771

772

```typescript { .api }

773

interface CustomFormatPromptTemplateInput {

774

template: string;

775

formatter: (template: string, values: InputValues) => string;

776

inputVariables: string[];

777

}

778

779

interface HandlebarsPromptTemplateInput {

780

template: string;

781

helpers?: Record<string, Function>;

782

inputVariables: string[];

783

}

784

```

785

786

## Experimental Usage Patterns

787

788

### AutoGPT for Research Tasks

789

790

```typescript

791

import { AutoGPT } from "langchain/experimental/autogpt";

792

793

const researchBot = AutoGPT.fromLLMAndTools(llm, [

794

webSearchTool,

795

fileWriteTool,

796

emailTool

797

], {

798

aiName: "ResearchBot",

799

aiRole: "Autonomous research assistant that can search, analyze, and report findings",

800

maxIterations: 20

801

});

802

803

await researchBot.run([

804

"Research the latest trends in renewable energy technology",

805

"Compile findings into a comprehensive report",

806

"Email the report to stakeholders"

807

]);

808

```

809

810

### Generative Agent Simulation

811

812

```typescript

813

import { GenerativeAgent } from "langchain/experimental/generative_agents";

814

815

const agent = GenerativeAgent.create(

816

"Alice",

817

memoryRetriever,

818

llm,

819

{

820

age: 25,

821

traits: "curious, analytical, friendly",

822

status: "working on a research project"

823

}

824

);

825

826

// Add some memories

827

await agent.addMemory("Had coffee with Bob to discuss the project");

828

await agent.addMemory("Found an interesting paper on neural networks");

829

830

// Generate reaction to new observation

831

const [shouldRespond, response] = await agent.generateReaction(

832

"Someone asks about the research project progress"

833

);

834

835

if (shouldRespond) {

836

console.log(response);

837

}

838

```

839

840

**Warning**: These experimental features are not guaranteed to be stable and should be used with caution in production environments.