or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

array-creation.mdarray-statistics.mddata-types.mdfft.mdindex.mdinput-output.mdlinear-algebra.mdmasked-arrays.mdmathematical-functions.mdpolynomial.mdrandom-generation.mdsearching-sorting.md

input-output.mddocs/

0

# Input/Output Operations

1

2

File I/O operations for saving and loading array data in various formats. Supports both binary and text formats with options for compression and structured data handling.

3

4

## Capabilities

5

6

### Binary File Operations

7

8

Efficient binary storage and loading of NumPy arrays.

9

10

```python { .api }

11

def save(file, arr, allow_pickle=True, fix_imports=True):

12

"""

13

Save array to binary file in NumPy .npy format.

14

15

Parameters:

16

- file: file, str, or pathlib.Path, file to write

17

- arr: array_like, array to save

18

- allow_pickle: bool, allow pickling of object arrays

19

- fix_imports: bool, fix Python 2/3 compatibility

20

21

Returns:

22

None: Saves array to file

23

"""

24

25

def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, encoding='ASCII'):

26

"""

27

Load arrays from .npy, .npz or pickled files.

28

29

Parameters:

30

- file: file, str, or pathlib.Path, file to read

31

- mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, memory-map mode

32

- allow_pickle: bool, allow loading pickled object arrays

33

- fix_imports: bool, fix Python 2/3 compatibility

34

- encoding: str, encoding for ASCII strings

35

36

Returns:

37

ndarray or NpzFile: Loaded array(s)

38

"""

39

40

def savez(file, *args, **kwds):

41

"""

42

Save several arrays into single uncompressed .npz file.

43

44

Parameters:

45

- file: str or file, output file

46

- *args: array_like, arrays to save

47

- **kwds: array_like, arrays to save with keyword names

48

49

Returns:

50

None: Saves arrays to npz file

51

"""

52

53

def savez_compressed(file, *args, **kwds):

54

"""

55

Save several arrays into single compressed .npz file.

56

57

Parameters:

58

- file: str or file, output file

59

- *args: array_like, arrays to save

60

- **kwds: array_like, arrays to save with keyword names

61

62

Returns:

63

None: Saves arrays to compressed npz file

64

"""

65

```

66

67

### Text File Operations

68

69

Human-readable text format I/O operations.

70

71

```python { .api }

72

def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,

73

skiprows=0, usecols=None, unpack=False, ndmin=0, encoding='bytes',

74

max_rows=None, like=None):

75

"""

76

Load data from text file.

77

78

Parameters:

79

- fname: file, str, or pathlib.Path, file to read

80

- dtype: data-type, data type of array

81

- comments: str or sequence, comment character(s)

82

- delimiter: str, field delimiter

83

- converters: dict, column conversion functions

84

- skiprows: int, number of rows to skip

85

- usecols: int or sequence, columns to read

86

- unpack: bool, return separate arrays for columns

87

- ndmin: int, minimum number of dimensions

88

- encoding: str, encoding to decode input

89

- max_rows: int, maximum rows to read

90

- like: array_like, reference object

91

92

Returns:

93

ndarray: Data read from text file

94

"""

95

96

def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',

97

footer='', comments='# ', encoding=None):

98

"""

99

Save array to text file.

100

101

Parameters:

102

- fname: filename or file handle, output file

103

- X: 1-D or 2-D array_like, data to save

104

- fmt: str or sequence of strs, format for each column

105

- delimiter: str, string or character separating columns

106

- newline: str, string separating lines

107

- header: str, string written at beginning of file

108

- footer: str, string written at end of file

109

- comments: str, string prepended to header and footer

110

- encoding: {None, str}, encoding for output

111

112

Returns:

113

None: Saves array to text file

114

"""

115

116

def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0,

117

skip_footer=0, converters=None, missing_values=None, filling_values=None,

118

usecols=None, names=None, excludelist=None, deletechars=None,

119

defaultfmt="f%i", autostrip=False, replace_space='_', case_sensitive=True,

120

unpack=None, invalid_raise=True, max_rows=None, encoding='bytes', like=None):

121

"""

122

Load data from text file, handling missing values.

123

124

Parameters:

125

- fname: file, str, list, or generator, input data

126

- dtype: numpy data type, data type of array

127

- comments: str, comment character

128

- delimiter: str, field separator

129

- skip_header: int, number of lines to skip from beginning

130

- skip_footer: int, number of lines to skip from end

131

- converters: dict, column converters

132

- missing_values: set of str, strings indicating missing data

133

- filling_values: scalar or dict, values for missing data

134

- usecols: sequence, columns to read

135

- names: {None, True, str, sequence}, field names for structured array

136

- excludelist: sequence, names of fields to exclude

137

- deletechars: str, characters to remove from field names

138

- defaultfmt: str, default format for field names

139

- autostrip: bool, automatically strip whitespace

140

- replace_space: str, character to replace spaces in field names

141

- case_sensitive: bool, case sensitivity for field names

142

- unpack: bool, return separate arrays for columns

143

- invalid_raise: bool, raise exception on inconsistent data

144

- max_rows: int, maximum rows to read

145

- encoding: str, encoding to decode input

146

- like: array_like, reference object

147

148

Returns:

149

ndarray: Data from text file with missing value handling

150

"""

151

152

def fromregex(file, regexp, dtype, encoding=None):

153

"""

154

Construct array from text file using regular expression parsing.

155

156

Parameters:

157

- file: file or str, input file

158

- regexp: str or regexp, regular expression

159

- dtype: data-type, data type for array

160

- encoding: str, encoding to decode input

161

162

Returns:

163

ndarray: Array constructed from regex parsing

164

"""

165

```

166

167

### Binary Data Construction

168

169

Create arrays from various binary data sources.

170

171

```python { .api }

172

def frombuffer(buffer, dtype=float, count=-1, offset=0, like=None):

173

"""

174

Interpret buffer as 1-D array.

175

176

Parameters:

177

- buffer: buffer_like, object exposing buffer interface

178

- dtype: data-type, data type of array

179

- count: int, number of items to read (-1 for all)

180

- offset: int, start reading from this offset

181

- like: array_like, reference object

182

183

Returns:

184

ndarray: 1-D array from buffer

185

"""

186

187

def fromfile(file, dtype=float, count=-1, sep='', offset=0, like=None):

188

"""

189

Construct array from data in text or binary file.

190

191

Parameters:

192

- file: file or str, input file

193

- dtype: data-type, data type of returned array

194

- count: int, number of items to read (-1 for all)

195

- sep: str, separator between items ('' for binary)

196

- offset: int, offset in bytes from current position

197

- like: array_like, reference object

198

199

Returns:

200

ndarray: Array from file data

201

"""

202

203

def fromstring(string, dtype=float, count=-1, sep='', like=None):

204

"""

205

Create array from string data.

206

207

Parameters:

208

- string: str, string containing array data

209

- dtype: data-type, data type of array

210

- count: int, number of items to read (-1 for all)

211

- sep: str, separator between items

212

- like: array_like, reference object

213

214

Returns:

215

ndarray: Array from string data

216

"""

217

218

def fromiter(iterable, dtype, count=-1, like=None):

219

"""

220

Create array from iterable object.

221

222

Parameters:

223

- iterable: iterable, object to convert to array

224

- dtype: data-type, data type of returned array

225

- count: int, number of items to read (-1 for all)

226

- like: array_like, reference object

227

228

Returns:

229

ndarray: Array from iterable

230

"""

231

```

232

233

### Bit Packing Operations

234

235

Pack and unpack binary data.

236

237

```python { .api }

238

def packbits(a, axis=None, bitorder='big'):

239

"""

240

Pack elements of uint8 array into bits in uint8 array.

241

242

Parameters:

243

- a: array_like, input array of integers or booleans

244

- axis: int, dimension over which to pack bits

245

- bitorder: {'big', 'little'}, bit order within bytes

246

247

Returns:

248

ndarray: Packed array

249

"""

250

251

def unpackbits(a, axis=None, count=None, bitorder='big'):

252

"""

253

Unpack elements of uint8 array into binary-valued output array.

254

255

Parameters:

256

- a: ndarray, input array (uint8 type)

257

- axis: int, dimension over which to unpack bits

258

- count: int, number of elements to unpack

259

- bitorder: {'big', 'little'}, bit order within bytes

260

261

Returns:

262

ndarray: Unpacked array with binary values

263

"""

264

```

265

266

### Memory Mapping

267

268

Memory-mapped file access for large arrays.

269

270

```python { .api }

271

class memmap(ndarray):

272

"""

273

Create memory-map to array stored in binary file on disk.

274

275

Parameters:

276

- filename: str, pathlib.Path, or file object, file name

277

- dtype: data-type, data type of array

278

- mode: {'r+', 'r', 'w+', 'c'}, file access mode

279

- offset: int, offset in bytes from beginning of file

280

- shape: tuple, shape of array

281

- order: {'C', 'F'}, row or column major order

282

283

Returns:

284

memmap: Memory-mapped array

285

"""

286

def __new__(subtype, filename, dtype=float, mode='r+', offset=0, shape=None, order='C'): ...

287

```

288

289

## Usage Examples

290

291

### Basic File Operations

292

293

```python

294

import numpy as np

295

296

# Create sample data

297

data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])

298

299

# Save to binary format (.npy)

300

np.save('data.npy', data)

301

302

# Load from binary format

303

loaded_data = np.load('data.npy')

304

305

# Save multiple arrays to compressed file

306

extra_data = np.array([10, 20, 30])

307

np.savez_compressed('multiple_arrays.npz', main=data, extra=extra_data)

308

309

# Load from npz file

310

with np.load('multiple_arrays.npz') as npz_file:

311

main_array = npz_file['main']

312

extra_array = npz_file['extra']

313

```

314

315

### Text File Operations

316

317

```python

318

import numpy as np

319

320

# Sample data

321

data = np.array([[1.1, 2.2, 3.3],

322

[4.4, 5.5, 6.6],

323

[7.7, 8.8, 9.9]])

324

325

# Save to text file

326

np.savetxt('data.txt', data, fmt='%.2f', delimiter=',',

327

header='col1,col2,col3', comments='')

328

329

# Load from text file

330

loaded_text = np.loadtxt('data.txt', delimiter=',', skiprows=1)

331

332

# Load CSV with headers and missing values

333

csv_data = """# Weather data

334

# Date,Temperature,Humidity,Pressure

335

2023-01-01,22.5,65,1013.2

336

2023-01-02,,70,1015.8

337

2023-01-03,25.1,NaN,1012.4"""

338

339

with open('weather.csv', 'w') as f:

340

f.write(csv_data)

341

342

# Load with missing value handling

343

weather = np.genfromtxt('weather.csv', delimiter=',', skip_header=2,

344

missing_values=['', 'NaN'], filling_values=np.nan,

345

usecols=(1, 2, 3))

346

```

347

348

### Working with Large Files

349

350

```python

351

import numpy as np

352

353

# Create large array for demonstration

354

large_data = np.random.random((1000, 1000))

355

np.save('large_data.npy', large_data)

356

357

# Use memory mapping for large files (doesn't load into memory)

358

mmapped = np.load('large_data.npy', mmap_mode='r')

359

360

# Access parts of the array without loading everything

361

subset = mmapped[100:200, 200:300] # Only loads needed section

362

row_sum = np.sum(mmapped[0, :]) # Efficient row operations

363

364

# Create memory-mapped array directly

365

mmap_array = np.memmap('temp_mmap.dat', dtype='float32', mode='w+', shape=(1000, 1000))

366

mmap_array[:] = np.random.random((1000, 1000)).astype('float32')

367

mmap_array.flush() # Ensure data is written to disk

368

```

369

370

### Binary Data Handling

371

372

```python

373

import numpy as np

374

375

# Pack bits for efficient storage

376

binary_data = np.array([0, 1, 1, 0, 1, 0, 0, 1], dtype=np.uint8)

377

packed = np.packbits(binary_data) # Pack 8 bits into 1 byte

378

379

# Unpack bits

380

unpacked = np.unpackbits(packed)

381

382

# Create array from buffer (e.g., from bytes)

383

byte_data = b'\x01\x02\x03\x04'

384

from_buffer = np.frombuffer(byte_data, dtype=np.uint8)

385

386

# Create array from iterator

387

squares = np.fromiter((x**2 for x in range(10)), dtype=int)

388

```