or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

agents.mdcaches.mdcallbacks.mddocuments.mdembeddings.mdindex.mdlanguage-models.mdmemory-storage.mdmessages.mdoutput-parsers.mdprompts.mdretrievers.mdrunnables.mdtools.mdvectorstores.md
tile.json

caches.mddocs/

0

# Caching

1

2

Caching framework for optimizing repeated operations by storing and retrieving previously computed results. Caching improves performance and reduces API costs in LangChain applications.

3

4

## Capabilities

5

6

### Base Cache

7

8

Abstract base class for all cache implementations.

9

10

```typescript { .api }

11

/**

12

* Abstract base class for caching implementations

13

* @template T - Type of cached values

14

*/

15

abstract class BaseCache<T = any> {

16

constructor();

17

18

/** Look up cached value by prompt and model key */

19

abstract lookup(prompt: string, llmKey: string): Promise<T | null>;

20

21

/** Store value in cache */

22

abstract update(prompt: string, llmKey: string, value: T): Promise<void>;

23

24

/** Set default key encoder function */

25

makeDefaultKeyEncoder(keyEncoderFn: HashKeyEncoder): void;

26

}

27

```

28

29

### In-Memory Cache

30

31

Simple in-memory cache implementation.

32

33

```typescript { .api }

34

/**

35

* In-memory cache implementation

36

* @template T - Type of cached values

37

*/

38

class InMemoryCache<T = any> extends BaseCache<T> {

39

/** Internal cache storage */

40

private cache: Map<string, T>;

41

42

constructor();

43

44

/** Get cached value */

45

async lookup(prompt: string, llmKey: string): Promise<T | null>;

46

47

/** Store value in cache */

48

async update(prompt: string, llmKey: string, value: T): Promise<void>;

49

50

/** Get global cache instance */

51

static global(): InMemoryCache;

52

53

/** Clear all cached values */

54

clear(): void;

55

}

56

```

57

58

**Usage Examples:**

59

60

```typescript

61

import { InMemoryCache } from "@langchain/core/caches";

62

63

// Create cache instance

64

const cache = new InMemoryCache<string>();

65

66

// Store value

67

await cache.update("What is 2+2?", "gpt-3.5-turbo", "2+2 equals 4");

68

69

// Retrieve value

70

const cached = await cache.lookup("What is 2+2?", "gpt-3.5-turbo");

71

console.log(cached); // "2+2 equals 4"

72

73

// Use global cache instance

74

const globalCache = InMemoryCache.global();

75

await globalCache.update("Hello", "model-key", "Hi there!");

76

77

// Clear cache

78

cache.clear();

79

```

80

81

### Cache Utilities

82

83

Utility functions for cache key generation and serialization.

84

85

```typescript { .api }

86

/**

87

* Generate cache key from strings (deprecated)

88

* @deprecated Use proper key encoding instead

89

*/

90

function getCacheKey(...strings: string[]): string;

91

92

/**

93

* Serialize generation for caching

94

*/

95

function serializeGeneration(generation: Generation): StoredGeneration;

96

97

/**

98

* Deserialize stored generation from cache

99

*/

100

function deserializeStoredGeneration(storedGeneration: StoredGeneration): Generation;

101

102

/**

103

* Hash key encoder function type

104

*/

105

type HashKeyEncoder = (key: string) => string;

106

```

107

108

**Usage Examples:**

109

110

```typescript

111

import { serializeGeneration, deserializeStoredGeneration } from "@langchain/core/caches";

112

113

// Serialize generation for storage

114

const generation = {

115

text: "Hello world",

116

generationInfo: { model: "gpt-3.5-turbo", tokens: 10 }

117

};

118

119

const stored = serializeGeneration(generation);

120

console.log(stored); // Serialized format suitable for caching

121

122

// Deserialize from cache

123

const restored = deserializeStoredGeneration(stored);

124

console.log(restored.text); // "Hello world"

125

```

126

127

## Cache Integration

128

129

### Using Cache with Language Models

130

131

```typescript

132

// Example of setting up caching with a language model

133

import { InMemoryCache } from "@langchain/core/caches";

134

135

// Create global cache

136

const cache = InMemoryCache.global();

137

138

// Cache would be used automatically by language models

139

// when they perform lookups before making API calls

140

const result1 = await model.invoke("Explain quantum physics"); // API call

141

const result2 = await model.invoke("Explain quantum physics"); // Retrieved from cache

142

```

143

144

## Types

145

146

```typescript { .api }

147

interface Generation {

148

/** Generated text */

149

text: string;

150

/** Additional generation metadata */

151

generationInfo?: Record<string, unknown>;

152

}

153

154

interface StoredGeneration {

155

/** Serialized text */

156

text: string;

157

/** Serialized generation info */

158

generationInfo?: Record<string, unknown>;

159

}

160

161

interface CacheInterface<T = any> {

162

lookup(prompt: string, llmKey: string): Promise<T | null>;

163

update(prompt: string, llmKey: string, value: T): Promise<void>;

164

}

165

```