0
# Chains and Workflows
1
2
Composable sequences of operations that combine models, retrievers, and processing steps to create complex workflows. Chains enable sophisticated AI applications by linking together multiple components in a reusable and modular way.
3
4
## Capabilities
5
6
### Basic Chain Types
7
8
Core chain classes for common LLM workflows and operations.
9
10
```python { .api }
11
class LLMChain:
12
"""Chain for single LLM calls with prompt templates.
13
14
**Deprecated since 0.1.17**: Use RunnableSequence, e.g., `prompt | llm` instead.
15
Removal planned for version 1.0.
16
"""
17
18
def __init__(
19
self,
20
llm: BaseLanguageModel,
21
prompt: BasePromptTemplate,
22
output_parser: Optional[BaseOutputParser] = None,
23
memory: Optional[BaseMemory] = None,
24
**kwargs: Any
25
): ...
26
27
def run(self, **kwargs: Any) -> str:
28
"""Run chain with keyword arguments, return string output."""
29
30
def invoke(self, input: dict) -> dict:
31
"""Invoke chain with dict input, return dict output."""
32
33
def stream(self, input: dict):
34
"""Stream chain execution with incremental outputs."""
35
36
def batch(self, inputs: List[dict]) -> List[dict]:
37
"""Process multiple inputs in batch."""
38
39
class ConversationChain:
40
"""Chain for conversational interactions with memory.
41
42
**Deprecated since 0.2.7**: Use RunnableWithMessageHistory instead.
43
Removal planned for version 1.0.
44
"""
45
46
def __init__(
47
self,
48
llm: BaseLanguageModel,
49
memory: Optional[BaseMemory] = None,
50
prompt: Optional[BasePromptTemplate] = None,
51
**kwargs: Any
52
): ...
53
54
def predict(self, input: str) -> str:
55
"""Generate response to input maintaining conversation context."""
56
57
class TransformChain:
58
"""Chain that transforms input using a function."""
59
60
def __init__(
61
self,
62
input_variables: List[str],
63
output_variables: List[str],
64
transform: Callable[[dict], dict],
65
**kwargs: Any
66
): ...
67
```
68
69
### Sequential Chains
70
71
Chains that execute multiple steps in sequence, passing outputs as inputs to subsequent steps.
72
73
```python { .api }
74
class SequentialChain:
75
"""Chain that runs multiple chains in sequence."""
76
77
def __init__(
78
self,
79
chains: List[Chain],
80
input_variables: List[str],
81
output_variables: List[str] = None,
82
return_all: bool = False,
83
**kwargs: Any
84
): ...
85
86
def run(self, **kwargs: Any) -> Union[str, dict]: ...
87
88
class SimpleSequentialChain:
89
"""Sequential chain where each step has single input/output."""
90
91
def __init__(self, chains: List[Chain], **kwargs: Any): ...
92
93
def run(self, input: str) -> str: ...
94
```
95
96
### Question Answering Chains
97
98
Specialized chains for question-answering workflows with document retrieval and processing.
99
100
```python { .api }
101
class RetrievalQA:
102
"""Chain for question-answering with document retrieval."""
103
104
@classmethod
105
def from_chain_type(
106
cls,
107
llm: BaseLanguageModel,
108
chain_type: str = "stuff",
109
retriever: BaseRetriever = None,
110
return_source_documents: bool = False,
111
**kwargs: Any
112
) -> "RetrievalQA":
113
"""
114
Create RetrievalQA chain from chain type.
115
116
Parameters:
117
- llm: Language model for QA
118
- chain_type: "stuff", "map_reduce", "refine", or "map_rerank"
119
- retriever: Document retriever
120
- return_source_documents: Include source docs in response
121
"""
122
123
class VectorDBQA:
124
"""Question answering over vector database (deprecated, use RetrievalQA)."""
125
126
@classmethod
127
def from_chain_type(
128
cls,
129
llm: BaseLanguageModel,
130
chain_type: str = "stuff",
131
vectorstore: VectorStore = None,
132
**kwargs: Any
133
) -> "VectorDBQA": ...
134
135
class QAWithSourcesChain:
136
"""QA chain that includes source document citations."""
137
138
@classmethod
139
def from_chain_type(
140
cls,
141
llm: BaseLanguageModel,
142
chain_type: str = "stuff",
143
**kwargs: Any
144
) -> "QAWithSourcesChain": ...
145
146
class ConversationalRetrievalChain:
147
"""QA chain for conversational retrieval with chat history."""
148
149
@classmethod
150
def from_llm(
151
cls,
152
llm: BaseLanguageModel,
153
retriever: BaseRetriever,
154
memory: Optional[BaseMemory] = None,
155
**kwargs: Any
156
) -> "ConversationalRetrievalChain": ...
157
```
158
159
### Document Processing Chains
160
161
Chains for processing and combining multiple documents in various patterns.
162
163
```python { .api }
164
class StuffDocumentsChain:
165
"""Chain that stuffs all documents into a single prompt."""
166
167
def __init__(
168
self,
169
llm_chain: LLMChain,
170
document_variable_name: str = "context",
171
**kwargs: Any
172
): ...
173
174
class MapReduceDocumentsChain:
175
"""Chain that maps over documents then reduces results."""
176
177
def __init__(
178
self,
179
llm_chain: LLMChain,
180
reduce_documents_chain: ReduceDocumentsChain,
181
document_variable_name: str = "context",
182
**kwargs: Any
183
): ...
184
185
class RefineDocumentsChain:
186
"""Chain that iteratively refines answer using documents."""
187
188
def __init__(
189
self,
190
initial_llm_chain: LLMChain,
191
refine_llm_chain: LLMChain,
192
document_variable_name: str = "context",
193
**kwargs: Any
194
): ...
195
196
class MapRerankDocumentsChain:
197
"""Chain that maps over documents and reranks results."""
198
199
def __init__(
200
self,
201
llm_chain: LLMChain,
202
rank_key: str = "score",
203
answer_key: str = "answer",
204
**kwargs: Any
205
): ...
206
207
class AnalyzeDocumentChain:
208
"""Chain for analyzing a single large document."""
209
210
def __init__(
211
self,
212
combine_docs_chain: BaseCombineDocumentsChain,
213
text_splitter: TextSplitter = None,
214
**kwargs: Any
215
): ...
216
```
217
218
### Specialized Chains
219
220
Domain-specific chains for particular use cases and applications.
221
222
```python { .api }
223
class LLMCheckerChain:
224
"""Chain that checks LLM outputs for accuracy."""
225
226
@classmethod
227
def from_llm(
228
cls,
229
llm: BaseLanguageModel,
230
create_draft_answer_prompt: Optional[BasePromptTemplate] = None,
231
list_assertions_prompt: Optional[BasePromptTemplate] = None,
232
**kwargs: Any
233
) -> "LLMCheckerChain": ...
234
235
class LLMMathChain:
236
"""Chain for mathematical reasoning and calculations."""
237
238
@classmethod
239
def from_llm(
240
cls,
241
llm: BaseLanguageModel,
242
prompt: Optional[BasePromptTemplate] = None,
243
**kwargs: Any
244
) -> "LLMMathChain": ...
245
246
class APIChain:
247
"""Chain for making API calls based on natural language."""
248
249
@classmethod
250
def from_llm_and_api_docs(
251
cls,
252
llm: BaseLanguageModel,
253
api_docs: str,
254
**kwargs: Any
255
) -> "APIChain": ...
256
257
class RouterChain:
258
"""Chain that routes inputs to different destination chains."""
259
260
def __init__(
261
self,
262
router_chain: LLMRouterChain,
263
destination_chains: dict,
264
default_chain: Optional[Chain] = None,
265
**kwargs: Any
266
): ...
267
268
class MultiPromptChain:
269
"""Chain that routes to different prompts based on input."""
270
271
@classmethod
272
def from_prompts(
273
cls,
274
llm: BaseLanguageModel,
275
prompt_infos: List[dict],
276
default_chain: Optional[Chain] = None,
277
**kwargs: Any
278
) -> "MultiPromptChain": ...
279
```
280
281
### Modern Chain Creation Functions
282
283
Functional approach for creating retrieval and processing chains using the LCEL (LangChain Expression Language) format.
284
285
```python { .api }
286
def create_retrieval_chain(
287
retriever: BaseRetriever,
288
combine_docs_chain: Runnable
289
) -> Runnable:
290
"""
291
Create a retrieval chain combining retriever and processing.
292
293
Parameters:
294
- retriever: Document retriever
295
- combine_docs_chain: Chain to process retrieved documents
296
297
Returns:
298
Runnable chain for retrieval-based processing
299
"""
300
301
def create_history_aware_retriever(
302
llm: BaseLanguageModel,
303
retriever: BaseRetriever,
304
prompt: BasePromptTemplate
305
) -> Runnable:
306
"""
307
Create retriever that considers chat history for context.
308
309
Parameters:
310
- llm: Language model for history processing
311
- retriever: Base document retriever
312
- prompt: Template for incorporating history
313
"""
314
315
def create_sql_query_chain(
316
llm: BaseLanguageModel,
317
db: SQLDatabase,
318
prompt: Optional[BasePromptTemplate] = None,
319
**kwargs: Any
320
) -> Runnable:
321
"""Create chain for generating SQL queries from natural language."""
322
323
def create_structured_output_runnable(
324
output_schema: Union[dict, Type[BaseModel]],
325
llm: BaseLanguageModel,
326
prompt: BasePromptTemplate,
327
**kwargs: Any
328
) -> Runnable:
329
"""Create chain with structured output parsing."""
330
```
331
332
### Chain Loading and Management
333
334
Functions for loading chains from configuration and managing chain workflows.
335
336
```python { .api }
337
def load_chain(config: dict, **kwargs: Any) -> Chain:
338
"""
339
Load chain from configuration dict or file.
340
341
Parameters:
342
- config: Chain configuration
343
- kwargs: Additional chain parameters
344
345
Returns:
346
Configured chain instance
347
"""
348
349
def load_summarize_chain(
350
llm: BaseLanguageModel,
351
chain_type: str = "stuff",
352
**kwargs: Any
353
) -> BaseCombineDocumentsChain:
354
"""
355
Load summarization chain for document processing.
356
357
Parameters:
358
- llm: Language model for summarization
359
- chain_type: "stuff", "map_reduce", or "refine"
360
"""
361
```
362
363
### Chain Base Classes
364
365
Core abstractions and interfaces for chain implementations.
366
367
```python { .api }
368
class Chain:
369
"""Base chain class."""
370
371
@property
372
def input_keys(self) -> List[str]:
373
"""Input keys expected by chain."""
374
375
@property
376
def output_keys(self) -> List[str]:
377
"""Output keys produced by chain."""
378
379
def run(self, **kwargs: Any) -> str: ...
380
381
def invoke(self, input: dict) -> dict: ...
382
383
def stream(self, input: dict): ...
384
385
def batch(self, inputs: List[dict]) -> List[dict]: ...
386
387
class BaseCombineDocumentsChain(Chain):
388
"""Base chain for combining multiple documents."""
389
390
def combine_docs(self, docs: List[Document], **kwargs: Any) -> str: ...
391
```
392
393
## Usage Examples
394
395
### Basic LLM Chain
396
397
```python
398
from langchain.chains import LLMChain
399
from langchain_core.prompts import PromptTemplate
400
from langchain_openai import OpenAI
401
402
# Create prompt template
403
prompt = PromptTemplate(
404
input_variables=["product"],
405
template="What are the key benefits of {product}?"
406
)
407
408
# Create LLM chain
409
llm = OpenAI(temperature=0.7)
410
chain = LLMChain(llm=llm, prompt=prompt)
411
412
# Run chain
413
result = chain.run(product="electric vehicles")
414
print(result)
415
```
416
417
### Sequential Chain Example
418
419
```python
420
from langchain.chains import LLMChain, SequentialChain
421
from langchain_core.prompts import PromptTemplate
422
423
# First chain: generate topic
424
topic_prompt = PromptTemplate(
425
input_variables=["subject"],
426
template="Generate an interesting topic about {subject}"
427
)
428
topic_chain = LLMChain(llm=llm, prompt=topic_prompt, output_key="topic")
429
430
# Second chain: write content
431
content_prompt = PromptTemplate(
432
input_variables=["topic"],
433
template="Write a short article about: {topic}"
434
)
435
content_chain = LLMChain(llm=llm, prompt=content_prompt, output_key="article")
436
437
# Combine chains
438
overall_chain = SequentialChain(
439
chains=[topic_chain, content_chain],
440
input_variables=["subject"],
441
output_variables=["topic", "article"]
442
)
443
444
result = overall_chain.run(subject="artificial intelligence")
445
```
446
447
### Question Answering with Retrieval
448
449
```python
450
from langchain.chains import RetrievalQA
451
from langchain_community.vectorstores import FAISS
452
from langchain_openai import OpenAI, OpenAIEmbeddings
453
454
# Assuming you have a vector store with documents
455
vectorstore = FAISS.load_local("path/to/vectorstore", OpenAIEmbeddings())
456
retriever = vectorstore.as_retriever()
457
458
# Create QA chain
459
qa_chain = RetrievalQA.from_chain_type(
460
llm=OpenAI(temperature=0),
461
chain_type="stuff",
462
retriever=retriever,
463
return_source_documents=True
464
)
465
466
# Ask question
467
result = qa_chain.run("What are the main conclusions?")
468
```
469
470
### Conversational Retrieval
471
472
```python
473
from langchain.chains import ConversationalRetrievalChain
474
from langchain.memory import ConversationBufferMemory
475
476
# Create memory for conversation history
477
memory = ConversationBufferMemory(
478
memory_key="chat_history",
479
return_messages=True
480
)
481
482
# Create conversational retrieval chain
483
qa_chain = ConversationalRetrievalChain.from_llm(
484
llm=OpenAI(temperature=0),
485
retriever=retriever,
486
memory=memory
487
)
488
489
# Have conversation
490
result1 = qa_chain.run("What is the document about?")
491
result2 = qa_chain.run("Can you elaborate on the first point?")
492
```