0
# Memory and Context Management
1
2
State management components for maintaining conversation history, context, and intermediate results across interactions. Memory enables AI applications to have coherent conversations and maintain context over extended interactions.
3
4
## Capabilities
5
6
### Buffer Memory Types
7
8
Core memory implementations that store conversation history in different formats and with various retention strategies.
9
10
```python { .api }
11
class ConversationBufferMemory:
12
"""Buffer memory that stores entire conversation history."""
13
14
def __init__(
15
self,
16
human_prefix: str = "Human",
17
ai_prefix: str = "AI",
18
memory_key: str = "history",
19
return_messages: bool = False,
20
input_key: Optional[str] = None,
21
output_key: Optional[str] = None,
22
**kwargs: Any
23
):
24
"""
25
Initialize buffer memory.
26
27
Parameters:
28
- human_prefix: Prefix for human messages
29
- ai_prefix: Prefix for AI messages
30
- memory_key: Key to store memory in chain input
31
- return_messages: Return as Message objects vs string
32
- input_key: Key for input in conversation
33
- output_key: Key for output in conversation
34
"""
35
36
def save_context(self, inputs: dict, outputs: dict) -> None:
37
"""Save context from conversation turn."""
38
39
def load_memory_variables(self, inputs: dict) -> dict:
40
"""Load memory variables for chain input."""
41
42
def clear(self) -> None:
43
"""Clear memory contents."""
44
45
class ConversationBufferWindowMemory:
46
"""Windowed buffer memory keeping only recent messages."""
47
48
def __init__(
49
self,
50
k: int = 5,
51
return_messages: bool = False,
52
**kwargs: Any
53
):
54
"""
55
Initialize windowed buffer memory.
56
57
Parameters:
58
- k: Number of recent conversation turns to keep
59
- return_messages: Return as Message objects vs string
60
"""
61
62
def save_context(self, inputs: dict, outputs: dict) -> None: ...
63
64
def load_memory_variables(self, inputs: dict) -> dict: ...
65
66
class ConversationSummaryMemory:
67
"""Memory that summarizes conversation history using LLM."""
68
69
def __init__(
70
self,
71
llm: BaseLanguageModel,
72
memory_key: str = "history",
73
return_messages: bool = False,
74
buffer: str = "",
75
**kwargs: Any
76
):
77
"""
78
Initialize summary memory.
79
80
Parameters:
81
- llm: Language model for summarization
82
- memory_key: Key to store memory in chain input
83
- return_messages: Return as Message objects vs string
84
- buffer: Initial summary buffer
85
"""
86
87
def save_context(self, inputs: dict, outputs: dict) -> None: ...
88
89
def load_memory_variables(self, inputs: dict) -> dict: ...
90
91
def predict_new_summary(
92
self,
93
messages: List[BaseMessage],
94
existing_summary: str
95
) -> str:
96
"""Generate new summary from messages and existing summary."""
97
98
class ConversationSummaryBufferMemory:
99
"""Hybrid memory combining summary and buffer approaches."""
100
101
def __init__(
102
self,
103
llm: BaseLanguageModel,
104
max_token_limit: int = 2000,
105
return_messages: bool = False,
106
**kwargs: Any
107
):
108
"""
109
Initialize summary buffer memory.
110
111
Parameters:
112
- llm: Language model for summarization
113
- max_token_limit: Token limit before summarization
114
- return_messages: Return as Message objects vs string
115
"""
116
117
def save_context(self, inputs: dict, outputs: dict) -> None: ...
118
119
def load_memory_variables(self, inputs: dict) -> dict: ...
120
```
121
122
### Specialized Memory Types
123
124
Memory implementations for specific use cases and data structures.
125
126
```python { .api }
127
class ConversationTokenBufferMemory:
128
"""Buffer memory with token-based truncation."""
129
130
def __init__(
131
self,
132
llm: BaseLanguageModel,
133
max_token_limit: int = 2000,
134
return_messages: bool = False,
135
**kwargs: Any
136
):
137
"""
138
Initialize token buffer memory.
139
140
Parameters:
141
- llm: Language model for token counting
142
- max_token_limit: Maximum tokens to keep in buffer
143
- return_messages: Return as Message objects vs string
144
"""
145
146
class VectorStoreRetrieverMemory:
147
"""Memory backed by vector store for semantic retrieval."""
148
149
def __init__(
150
self,
151
retriever: VectorStoreRetriever,
152
memory_key: str = "history",
153
input_key: Optional[str] = None,
154
**kwargs: Any
155
): ...
156
157
def save_context(self, inputs: dict, outputs: dict) -> None: ...
158
159
def load_memory_variables(self, inputs: dict) -> dict: ...
160
161
class ConversationEntityMemory:
162
"""Memory that tracks entities mentioned in conversation."""
163
164
def __init__(
165
self,
166
llm: BaseLanguageModel,
167
entity_extraction_prompt: Optional[BasePromptTemplate] = None,
168
entity_summarization_prompt: Optional[BasePromptTemplate] = None,
169
**kwargs: Any
170
): ...
171
172
def save_context(self, inputs: dict, outputs: dict) -> None: ...
173
174
def load_memory_variables(self, inputs: dict) -> dict: ...
175
176
class ConversationKGMemory:
177
"""Memory that maintains knowledge graph of conversation."""
178
179
def __init__(
180
self,
181
llm: BaseLanguageModel,
182
kg: NetworkXEntityGraph,
183
memory_key: str = "history",
184
**kwargs: Any
185
): ...
186
```
187
188
### Memory Base Classes
189
190
Core abstractions and interfaces for memory implementations.
191
192
```python { .api }
193
class BaseMemory:
194
"""Base class for memory implementations."""
195
196
@property
197
def memory_variables(self) -> List[str]:
198
"""Variables this memory class provides."""
199
200
def load_memory_variables(self, inputs: dict) -> dict:
201
"""Load memory variables for use in chain."""
202
203
def save_context(self, inputs: dict, outputs: dict) -> None:
204
"""Save context from this conversation turn."""
205
206
def clear(self) -> None:
207
"""Clear memory contents."""
208
209
class BaseChatMemory(BaseMemory):
210
"""Base class for chat memory implementations."""
211
212
def __init__(
213
self,
214
chat_memory: BaseChatMessageHistory = None,
215
output_key: Optional[str] = None,
216
input_key: Optional[str] = None,
217
return_messages: bool = False,
218
**kwargs: Any
219
): ...
220
221
@property
222
def chat_memory(self) -> BaseChatMessageHistory: ...
223
224
class ReadOnlySharedMemory(BaseMemory):
225
"""Read-only memory that can be shared across chains."""
226
227
def __init__(self, memory: BaseMemory): ...
228
229
def load_memory_variables(self, inputs: dict) -> dict: ...
230
231
def save_context(self, inputs: dict, outputs: dict) -> None:
232
"""No-op for read-only memory."""
233
```
234
235
### Chat Message History
236
237
Storage backends for conversation history with different persistence options.
238
239
```python { .api }
240
from langchain_core.chat_history import BaseChatMessageHistory
241
from langchain_core.messages import BaseMessage
242
243
class BaseChatMessageHistory:
244
"""Base class for chat message history storage."""
245
246
def add_message(self, message: BaseMessage) -> None:
247
"""Add message to history."""
248
249
def add_user_message(self, message: str) -> None:
250
"""Add user message to history."""
251
252
def add_ai_message(self, message: str) -> None:
253
"""Add AI message to history."""
254
255
def clear(self) -> None:
256
"""Clear message history."""
257
258
@property
259
def messages(self) -> List[BaseMessage]:
260
"""Get all messages in history."""
261
262
class ChatMessageHistory(BaseChatMessageHistory):
263
"""In-memory chat message history."""
264
265
def __init__(self, messages: Optional[List[BaseMessage]] = None): ...
266
267
# Persistent storage options available in langchain_community:
268
# - FileChatMessageHistory: File-based persistence
269
# - RedisChatMessageHistory: Redis-based persistence
270
# - MongoDBChatMessageHistory: MongoDB-based persistence
271
# - PostgresChatMessageHistory: PostgreSQL-based persistence
272
```
273
274
### Memory Integration Patterns
275
276
Utilities for integrating memory with chains and applications.
277
278
```python { .api }
279
class SimpleMemory(BaseMemory):
280
"""Simple key-value memory store."""
281
282
def __init__(self, memories: Optional[dict] = None): ...
283
284
@property
285
def memory_variables(self) -> List[str]: ...
286
287
def load_memory_variables(self, inputs: dict) -> dict: ...
288
289
def save_context(self, inputs: dict, outputs: dict) -> None: ...
290
291
class CombinedMemory(BaseMemory):
292
"""Combine multiple memory instances."""
293
294
def __init__(self, memories: List[BaseMemory]): ...
295
296
def load_memory_variables(self, inputs: dict) -> dict: ...
297
298
def save_context(self, inputs: dict, outputs: dict) -> None: ...
299
```
300
301
## Usage Examples
302
303
### Basic Conversation Memory
304
305
```python
306
from langchain.memory import ConversationBufferMemory
307
from langchain.chains import ConversationChain
308
from langchain_openai import OpenAI
309
310
# Create memory
311
memory = ConversationBufferMemory()
312
313
# Create conversation chain with memory
314
llm = OpenAI(temperature=0)
315
conversation = ConversationChain(
316
llm=llm,
317
memory=memory,
318
verbose=True
319
)
320
321
# Have conversation - memory persists context
322
response1 = conversation.predict(input="Hi, my name is John")
323
response2 = conversation.predict(input="What's my name?")
324
# The AI will remember "John" from the previous interaction
325
```
326
327
### Windowed Memory
328
329
```python
330
from langchain.memory import ConversationBufferWindowMemory
331
332
# Keep only last 3 conversation turns
333
memory = ConversationBufferWindowMemory(k=3)
334
335
conversation = ConversationChain(
336
llm=OpenAI(temperature=0),
337
memory=memory
338
)
339
340
# Only the most recent 3 exchanges will be remembered
341
for i in range(10):
342
response = conversation.predict(input=f"Message {i}")
343
```
344
345
### Summary Memory
346
347
```python
348
from langchain.memory import ConversationSummaryMemory
349
350
# Memory that summarizes conversation history
351
memory = ConversationSummaryMemory(
352
llm=OpenAI(temperature=0),
353
return_messages=True
354
)
355
356
conversation = ConversationChain(
357
llm=OpenAI(temperature=0),
358
memory=memory,
359
verbose=True
360
)
361
362
# Long conversations are summarized to maintain context
363
# while keeping memory usage bounded
364
```
365
366
### Memory with Custom Keys
367
368
```python
369
from langchain.memory import ConversationBufferMemory
370
from langchain.chains import LLMChain
371
from langchain_core.prompts import PromptTemplate
372
373
# Create memory with custom keys
374
memory = ConversationBufferMemory(
375
memory_key="chat_history",
376
input_key="question",
377
output_key="answer"
378
)
379
380
# Create prompt template that uses the memory
381
prompt = PromptTemplate(
382
input_variables=["chat_history", "question"],
383
template="""
384
Previous conversation:
385
{chat_history}
386
387
Current question: {question}
388
Answer:"""
389
)
390
391
# Create chain with memory
392
chain = LLMChain(
393
llm=OpenAI(temperature=0),
394
prompt=prompt,
395
memory=memory
396
)
397
398
# Use the chain
399
result = chain.run(question="What is machine learning?")
400
```
401
402
### Vector Store Memory
403
404
```python
405
from langchain.memory import VectorStoreRetrieverMemory
406
from langchain_community.vectorstores import FAISS
407
from langchain_openai import OpenAIEmbeddings
408
409
# Create vector store for semantic memory
410
embeddings = OpenAIEmbeddings()
411
vectorstore = FAISS.from_texts([""], embeddings)
412
retriever = vectorstore.as_retriever(search_kwargs={"k": 1})
413
414
# Create semantic memory
415
memory = VectorStoreRetrieverMemory(
416
retriever=retriever,
417
memory_key="relevant_context",
418
input_key="input"
419
)
420
421
# Memory will retrieve semantically relevant past conversations
422
conversation = ConversationChain(
423
llm=OpenAI(temperature=0),
424
memory=memory
425
)
426
```
427
428
### Multiple Memory Types
429
430
```python
431
from langchain.memory import (
432
CombinedMemory,
433
ConversationSummaryMemory,
434
ConversationBufferWindowMemory
435
)
436
437
# Combine different memory types
438
summary_memory = ConversationSummaryMemory(llm=OpenAI(temperature=0))
439
window_memory = ConversationBufferWindowMemory(k=3)
440
441
# Combined memory uses both approaches
442
combined_memory = CombinedMemory(
443
memories=[summary_memory, window_memory]
444
)
445
446
conversation = ConversationChain(
447
llm=OpenAI(temperature=0),
448
memory=combined_memory
449
)
450
```
451
452
### Persistent Memory
453
454
```python
455
from langchain_community.chat_message_histories import FileChatMessageHistory
456
from langchain.memory import ConversationBufferMemory
457
458
# Create persistent file-based message history
459
history = FileChatMessageHistory("conversation_history.json")
460
461
# Create memory with persistent storage
462
memory = ConversationBufferMemory(
463
chat_memory=history,
464
return_messages=True
465
)
466
467
# Conversation history will persist across sessions
468
conversation = ConversationChain(
469
llm=OpenAI(temperature=0),
470
memory=memory
471
)
472
```