0
# Full Archive Extraction
1
2
Transform stream for extracting entire ZIP archives to the filesystem with configurable concurrency and destination paths. This high-level interface provides a simple way to extract complete archives while maintaining memory efficiency through streaming.
3
4
## Capabilities
5
6
### Extract Constructor
7
8
Creates a transform stream that extracts all contents of a ZIP archive to a specified directory.
9
10
```javascript { .api }
11
/**
12
* Creates a transform stream for extracting ZIP archives to filesystem
13
* @param options - Configuration object with extraction settings
14
* @returns Extract transform stream instance
15
*/
16
class Extract extends Transform {
17
constructor(options: ExtractOptions);
18
}
19
20
interface ExtractOptions {
21
/** Destination directory path for extraction */
22
path: string;
23
/** Number of concurrent file extractions (optional) */
24
concurrency?: number;
25
}
26
```
27
28
**Usage Examples:**
29
30
```javascript
31
const unzipper = require("unzipper");
32
const fs = require("fs");
33
34
// Basic extraction to directory
35
fs.createReadStream("archive.zip")
36
.pipe(unzipper.Extract({ path: "extracted-files" }))
37
.on("close", () => {
38
console.log("Extraction completed");
39
});
40
41
// Extraction with custom concurrency
42
fs.createReadStream("large-archive.zip")
43
.pipe(unzipper.Extract({
44
path: "output",
45
concurrency: 5
46
}))
47
.on("close", () => {
48
console.log("Large archive extracted");
49
});
50
51
// Promise-based extraction
52
const extractPromise = new Promise((resolve, reject) => {
53
fs.createReadStream("data.zip")
54
.pipe(unzipper.Extract({ path: "data" }))
55
.on("close", resolve)
56
.on("error", reject);
57
});
58
59
await extractPromise;
60
console.log("Data extraction completed");
61
```
62
63
### Directory Creation
64
65
The Extract stream automatically creates the destination directory and any necessary parent directories.
66
67
```javascript
68
// Automatically creates nested directory structure
69
fs.createReadStream("project.zip")
70
.pipe(unzipper.Extract({ path: "projects/new-project/files" }));
71
```
72
73
### Concurrency Control
74
75
Configure the number of files extracted simultaneously to balance speed and resource usage.
76
77
```javascript
78
// Low concurrency for resource-constrained environments
79
unzipper.Extract({ path: "output", concurrency: 1 })
80
81
// High concurrency for fast extraction (default behavior)
82
unzipper.Extract({ path: "output", concurrency: 10 })
83
84
// Balanced concurrency
85
unzipper.Extract({ path: "output", concurrency: 3 })
86
```
87
88
## Events
89
90
### Stream Events
91
92
Standard transform stream events for monitoring extraction progress.
93
94
```javascript { .api }
95
const extractor = fs.createReadStream("archive.zip")
96
.pipe(unzipper.Extract({ path: "output" }));
97
98
// Extraction completed successfully
99
extractor.on('close', () => {
100
console.log('All files extracted');
101
});
102
103
// Transform stream finished processing
104
extractor.on('finish', () => {
105
console.log('Stream processing finished');
106
});
107
108
// Error occurred during extraction
109
extractor.on('error', (error: Error) => {
110
console.error('Extraction failed:', error);
111
});
112
```
113
114
## Advanced Usage
115
116
### Error Handling and Validation
117
118
```javascript
119
const path = require("path");
120
const fs = require("fs");
121
122
const extractWithValidation = (zipPath, outputPath) => {
123
return new Promise((resolve, reject) => {
124
// Ensure output directory exists
125
if (!fs.existsSync(outputPath)) {
126
fs.mkdirSync(outputPath, { recursive: true });
127
}
128
129
const startTime = Date.now();
130
let fileCount = 0;
131
132
fs.createReadStream(zipPath)
133
.pipe(unzipper.Extract({ path: outputPath }))
134
.on("close", () => {
135
const duration = Date.now() - startTime;
136
console.log(`Extracted ${fileCount} files in ${duration}ms`);
137
resolve({ fileCount, duration });
138
})
139
.on("error", (error) => {
140
console.error(`Extraction failed: ${error.message}`);
141
reject(error);
142
});
143
144
// Count files during extraction (approximate)
145
fs.createReadStream(zipPath)
146
.pipe(new unzipper.Parse())
147
.on("entry", (entry) => {
148
if (entry.type === "File") {
149
fileCount++;
150
}
151
entry.autodrain();
152
});
153
});
154
};
155
156
// Usage
157
try {
158
const result = await extractWithValidation("data.zip", "extracted");
159
console.log(`Successfully extracted ${result.fileCount} files`);
160
} catch (error) {
161
console.error("Extraction failed:", error);
162
}
163
```
164
165
### Extraction with Progress Monitoring
166
167
```javascript
168
const extractWithProgress = (zipPath, outputPath) => {
169
return new Promise((resolve, reject) => {
170
let extractedFiles = 0;
171
let totalFiles = 0;
172
173
// First pass: count total files
174
fs.createReadStream(zipPath)
175
.pipe(new unzipper.Parse())
176
.on("entry", (entry) => {
177
if (entry.type === "File") {
178
totalFiles++;
179
}
180
entry.autodrain();
181
})
182
.on("finish", () => {
183
console.log(`Starting extraction of ${totalFiles} files...`);
184
185
// Second pass: extract with progress
186
fs.createReadStream(zipPath)
187
.pipe(unzipper.Extract({ path: outputPath }))
188
.on("close", () => {
189
console.log(`\nExtraction completed: ${extractedFiles}/${totalFiles} files`);
190
resolve({ extractedFiles, totalFiles });
191
})
192
.on("error", reject);
193
194
// Monitor extraction progress (approximate)
195
const progressInterval = setInterval(() => {
196
extractedFiles++;
197
const percent = Math.round((extractedFiles / totalFiles) * 100);
198
process.stdout.write(`\rProgress: ${percent}% (${extractedFiles}/${totalFiles})`);
199
200
if (extractedFiles >= totalFiles) {
201
clearInterval(progressInterval);
202
}
203
}, 100);
204
})
205
.on("error", reject);
206
});
207
};
208
```
209
210
### Conditional Extraction with Filtering
211
212
```javascript
213
// Extract only specific file types
214
const extractFiltered = (zipPath, outputPath, allowedExtensions) => {
215
return new Promise((resolve, reject) => {
216
const tempPath = `${outputPath}-temp`;
217
218
// First extract everything to temp directory
219
fs.createReadStream(zipPath)
220
.pipe(unzipper.Extract({ path: tempPath }))
221
.on("close", () => {
222
// Then filter and move desired files
223
const moveFilteredFiles = async () => {
224
const glob = require("glob");
225
226
if (!fs.existsSync(outputPath)) {
227
fs.mkdirSync(outputPath, { recursive: true });
228
}
229
230
for (const ext of allowedExtensions) {
231
const pattern = `${tempPath}/**/*.${ext}`;
232
const files = glob.sync(pattern);
233
234
for (const file of files) {
235
const relativePath = path.relative(tempPath, file);
236
const destPath = path.join(outputPath, relativePath);
237
const destDir = path.dirname(destPath);
238
239
if (!fs.existsSync(destDir)) {
240
fs.mkdirSync(destDir, { recursive: true });
241
}
242
243
fs.renameSync(file, destPath);
244
}
245
}
246
247
// Clean up temp directory
248
fs.rmSync(tempPath, { recursive: true });
249
resolve();
250
};
251
252
moveFilteredFiles().catch(reject);
253
})
254
.on("error", reject);
255
});
256
};
257
258
// Usage: extract only JavaScript and JSON files
259
await extractFiltered("project.zip", "src", ["js", "json", "ts"]);
260
```
261
262
### Stream Composition
263
264
```javascript
265
const zlib = require("zlib");
266
267
// Extract compressed ZIP file
268
fs.createReadStream("archive.zip.gz")
269
.pipe(zlib.createGunzip())
270
.pipe(unzipper.Extract({ path: "output" }))
271
.on("close", () => {
272
console.log("Compressed archive extracted");
273
});
274
275
// Extract from HTTP response
276
const https = require("https");
277
278
const downloadAndExtract = (url, outputPath) => {
279
return new Promise((resolve, reject) => {
280
https.get(url, (response) => {
281
response
282
.pipe(unzipper.Extract({ path: outputPath }))
283
.on("close", resolve)
284
.on("error", reject);
285
}).on("error", reject);
286
});
287
};
288
289
await downloadAndExtract("https://example.com/archive.zip", "downloads");
290
```