NumPy & SciPy compatible GPU-accelerated array library for CUDA computing
—
File I/O operations supporting NumPy's binary formats (NPZ) and text formats with GPU-optimized loading and saving capabilities. CuPy provides comprehensive file operations for data persistence and exchange between CPU and GPU memory spaces.
High-performance binary file operations for efficient data storage and loading.
def save(file, arr, allow_pickle=True, fix_imports=True):
"""Save array to binary file in NumPy format.
Args:
file: File path or file object
arr: Array to save
allow_pickle: Allow pickling of Python objects
fix_imports: Fix Python 2/3 compatibility
Note:
Saves in uncompressed .npy format
"""
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, encoding='ASCII'):
"""Load array from binary file.
Args:
file: File path or file object
mmap_mode: Memory mapping mode (None, 'r', 'r+', 'w+', 'c')
allow_pickle: Allow loading pickled objects
fix_imports: Fix Python 2/3 compatibility
encoding: String encoding for Python 2/3 compatibility
Returns:
cupy.ndarray: Loaded array on GPU
"""
def savez(file, *args, **kwds):
"""Save multiple arrays to compressed archive.
Args:
file: Output file path
*args: Arrays to save (saved as arr_0, arr_1, ...)
**kwds: Named arrays to save
Note:
Creates uncompressed .npz archive
"""
def savez_compressed(file, *args, **kwds):
"""Save multiple arrays to compressed archive.
Args:
file: Output file path
*args: Arrays to save (saved as arr_0, arr_1, ...)
**kwds: Named arrays to save
Note:
Creates compressed .npz archive with better compression
"""
def loadz(file):
"""Load arrays from .npz archive.
Args:
file: Path to .npz file
Returns:
NpzFile: Dictionary-like object for array access
"""Text-based file operations for human-readable data exchange and compatibility.
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n',
header='', footer='', comments='# ', encoding=None):
"""Save array to text file.
Args:
fname: Output filename
X: Array to save (1D or 2D)
fmt: Format string or sequence of format strings
delimiter: String/character separating columns
newline: String/character separating lines
header: Text to write at beginning of file
footer: Text to write at end of file
comments: String to prepend to header/footer
encoding: Text encoding
Note:
Data is transferred to CPU before saving
"""
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None):
"""Load data from text file.
Args:
fname: Input filename
dtype: Data type of resulting array
comments: Character(s) indicating start of comment
delimiter: String used to separate values
converters: Dictionary of converter functions
skiprows: Number of rows to skip at beginning
usecols: Columns to read (None for all)
unpack: Unpack columns into separate arrays
ndmin: Minimum dimensions of returned array
encoding: Text encoding
max_rows: Maximum number of rows to read
Returns:
cupy.ndarray: Loaded data on GPU
"""
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
defaultfmt='f%i', autostrip=False, replace_space='_',
case_sensitive=True, unpack=None, invalid_raise=True,
max_rows=None, encoding='bytes'):
"""Enhanced text loading with missing value support.
Args:
fname: Input filename
dtype: Data type
comments: Comment character(s)
delimiter: Value separator
skip_header: Lines to skip at beginning
skip_footer: Lines to skip at end
converters: Converter functions
missing_values: Strings indicating missing data
filling_values: Values for missing data
usecols: Columns to use
names: Field names for structured arrays
excludelist: Names of fields to exclude
deletechars: Characters to remove from field names
defaultfmt: Default format for field names
autostrip: Strip whitespace from values
replace_space: Character to replace spaces in names
case_sensitive: Case sensitivity for field names
unpack: Unpack arrays
invalid_raise: Raise exception on invalid values
max_rows: Maximum rows to read
encoding: Text encoding
Returns:
cupy.ndarray: Loaded data with missing value handling
"""Functions for converting arrays to string representations and formatting.
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""Return string representation of array data.
Args:
a: Input array
max_line_width: Maximum characters per line
precision: Floating point precision
suppress_small: Suppress small values in scientific notation
Returns:
str: String representation of array data
"""
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""Return string representation of array.
Args:
arr: Input array
max_line_width: Maximum characters per line
precision: Floating point precision
suppress_small: Suppress small values
Returns:
str: String representation including array constructor
"""
def array2string(a, max_line_width=None, precision=None, suppress_small=None,
separator=' ', prefix="", style=str, formatter=None,
threshold=None, edgeitems=None, sign=None,
floatmode=None, suffix="", **kwds):
"""Return string representation with comprehensive formatting.
Args:
a: Input array
max_line_width: Maximum line width
precision: Number precision
suppress_small: Suppress small values
separator: Element separator
prefix: String prefix
style: Formatting style function
formatter: Custom formatting functions
threshold: Total items threshold for summarization
edgeitems: Items at beginning/end in summary
sign: Sign handling (' ', '+', '-')
floatmode: Float formatting mode
suffix: String suffix
**kwds: Additional formatting parameters
Returns:
str: Formatted string representation
"""
def base_repr(number, base=2, padding=0):
"""Return string representation in given base.
Args:
number: Input number
base: Number base (2-36)
padding: Minimum width with zero padding
Returns:
str: String representation in specified base
"""Advanced file operations for memory-mapped arrays and direct disk access.
def memmap(filename, dtype='uint8', mode='r+', offset=0, shape=None, order='C'):
"""Create memory-mapped array.
Args:
filename: Path to file
dtype: Data type
mode: File access mode ('r', 'r+', 'w+', 'c')
offset: Byte offset in file
shape: Array shape (required for modes other than 'r')
order: Memory layout ('C' or 'F')
Returns:
numpy.memmap: Memory-mapped array (CPU-based)
Note:
Returns NumPy memmap object. Use cp.asarray() to copy to GPU.
"""
def fromfile(file, dtype=float, count=-1, sep='', offset=0):
"""Create array from binary or text file data.
Args:
file: Open file object or filename
dtype: Data type of array elements
count: Number of items to read (-1 for all)
sep: Separator for text files (empty for binary)
offset: Byte offset for binary files
Returns:
cupy.ndarray: Array created from file data
"""
def tofile(arr, fid, sep="", format="%s"):
"""Write array to file as text or binary.
Args:
arr: Input array
fid: Open file object or filename
sep: Separator for text output (empty for binary)
format: Format string for text output
Note:
Data is transferred to CPU before writing
"""
def fromstring(string, dtype=float, count=-1, sep=''):
"""Create array from string data.
Args:
string: Input string
dtype: Data type
count: Number of items (-1 for all)
sep: Separator character
Returns:
cupy.ndarray: Array from string data
"""
def frombuffer(buffer, dtype=float, count=-1, offset=0):
"""Create array from buffer object.
Args:
buffer: Buffer object (bytes-like)
dtype: Data type
count: Number of items (-1 for all)
offset: Byte offset in buffer
Returns:
cupy.ndarray: Array from buffer data
"""Functions for controlling array display and I/O behavior.
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None,
infstr=None, formatter=None, sign=None,
floatmode=None, **kwds):
"""Set printing options for arrays.
Args:
precision: Number of digits of precision
threshold: Total array elements triggering summarization
edgeitems: Number of edge items in summary mode
linewidth: Characters per line for wrapping
suppress: Suppress small floating point values
nanstr: String representation of NaN values
infstr: String representation of infinity values
formatter: Custom formatting functions
sign: Control sign display
floatmode: Floating point formatting mode
**kwds: Additional options
"""
def get_printoptions():
"""Return current print options.
Returns:
dict: Current printing configuration
"""
def printoptions(*args, **kwargs):
"""Context manager for temporary print options.
Args:
*args, **kwargs: Print option arguments
Returns:
Context manager for temporary option changes
"""
def set_string_function(f, repr=True):
"""Set function for array string representation.
Args:
f: Function to use for string conversion
repr: Whether function is for repr (True) or str (False)
"""Functions for converting between different data formats and libraries.
def asarray(a, dtype=None, order=None):
"""Convert input to CuPy array.
Args:
a: Input data (any array-like)
dtype: Data type
order: Memory layout
Returns:
cupy.ndarray: Array on GPU
"""
def asnumpy(a, stream=None, order='C', out=None):
"""Convert CuPy array to NumPy array.
Args:
a: Input CuPy array
stream: CUDA stream for asynchronous transfer
order: Memory layout ('C', 'F', 'A', 'K')
out: Output NumPy array
Returns:
numpy.ndarray: Array on CPU
"""
def from_dlpack(x):
"""Create CuPy array from DLPack tensor.
Args:
x: Object with __dlpack__ method
Returns:
cupy.ndarray: Array sharing memory with DLPack tensor
"""
def fromDlpack(dltensor):
"""Create array from DLPack tensor capsule.
Args:
dltensor: DLPack tensor capsule
Returns:
cupy.ndarray: Array from DLPack tensor
"""Advanced operations for compressed data and multi-file archives.
class NpzFile:
"""Object for reading .npz archive files.
Provides dictionary-like access to arrays stored in .npz files.
Attributes:
files: List of file names in archive
Methods:
close(): Close the archive file
__getitem__(key): Get array by name
__contains__(key): Check if array exists
__iter__(): Iterate over array names
"""
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""Close the archive file."""
pass
def __getitem__(self, key):
"""Load array from archive."""
pass
def save_compressed(file, **arrays):
"""Save arrays to compressed archive with optimal compression.
Args:
file: Output file path
**arrays: Named arrays to save
Note:
Uses advanced compression for minimal file size
"""
def load_archive(file, lazy=False):
"""Load archive with optional lazy loading.
Args:
file: Archive file path
lazy: Load arrays only when accessed
Returns:
dict or NpzFile: Archive contents
"""import cupy as cp
import numpy as np
# Create sample data
data = cp.random.randn(1000, 100).astype(cp.float32)
labels = cp.random.randint(0, 10, 1000)
metadata = {'version': '1.0', 'description': 'Sample dataset'}
print(f"Data shape: {data.shape}, dtype: {data.dtype}")
print(f"Labels shape: {labels.shape}, dtype: {labels.dtype}")
# Save single array to binary format
cp.save('data.npy', data)
print("Saved data to 'data.npy'")
# Load single array
loaded_data = cp.load('data.npy')
print(f"Loaded data shape: {loaded_data.shape}")
print(f"Arrays equal: {cp.array_equal(data, loaded_data)}")
# Save multiple arrays to archive
cp.savez('dataset.npz',
features=data,
targets=labels,
train_indices=cp.arange(800),
test_indices=cp.arange(800, 1000))
print("Saved multiple arrays to 'dataset.npz'")
# Load from archive
with cp.load('dataset.npz') as archive:
print(f"Archive files: {archive.files}")
features = archive['features']
targets = archive['targets']
train_idx = archive['train_indices']
test_idx = archive['test_indices']
print(f"Loaded features shape: {features.shape}")
print(f"Loaded targets shape: {targets.shape}")
print(f"Train samples: {len(train_idx)}")
print(f"Test samples: {len(test_idx)}")
# Compressed saving for better storage efficiency
cp.savez_compressed('dataset_compressed.npz',
features=data,
targets=labels,
metadata=str(metadata))
print("Saved compressed archive to 'dataset_compressed.npz'")import cupy as cp
import os
# Create sample numeric data
measurements = cp.array([
[1.23, 4.56, 7.89],
[2.34, 5.67, 8.90],
[3.45, 6.78, 9.01],
[4.56, 7.89, 0.12]
])
# Save to text file with custom formatting
header_text = "Time Temperature Humidity"
cp.savetxt('measurements.txt', measurements,
fmt='%.2f',
delimiter='\t',
header=header_text,
comments='# ')
print("Saved measurements to 'measurements.txt'")
# Read the file content
with open('measurements.txt', 'r') as f:
content = f.read()
print("File contents:")
print(content)
# Load from text file
loaded_measurements = cp.loadtxt('measurements.txt')
print(f"Loaded measurements shape: {loaded_measurements.shape}")
print(f"Arrays equal: {cp.allclose(measurements, loaded_measurements)}")
# Load specific columns
time_temp = cp.loadtxt('measurements.txt', usecols=(0, 1))
print(f"Time and temperature shape: {time_temp.shape}")
# Advanced text loading with missing values
# Create data with missing values
mixed_data_text = """# Weather data
1.0 2.5 normal
2.0 nan windy
3.0 1.8 normal
nan 3.2 storm
5.0 2.1 normal"""
with open('weather.txt', 'w') as f:
f.write(mixed_data_text)
# Load with missing value handling
try:
weather_data = cp.genfromtxt('weather.txt',
missing_values={'nan': cp.nan},
filling_values={'nan': -999},
usecols=(0, 1), # Only numeric columns
dtype=float)
print(f"Weather data with missing values:")
print(weather_data)
print(f"NaN count: {cp.sum(cp.isnan(weather_data))}")
except Exception as e:
print(f"Error loading weather data: {e}")import cupy as cp
import time
import os
# Create large dataset for performance testing
n_samples = 100_000
n_features = 200
large_data = cp.random.randn(n_samples, n_features).astype(cp.float32)
print(f"Large dataset: {large_data.shape}, "
f"size: {large_data.nbytes / 1024**2:.1f} MB")
# Time different saving methods
save_methods = [
('Uncompressed NPY', lambda: cp.save('large_data.npy', large_data)),
('Uncompressed NPZ', lambda: cp.savez('large_data.npz', data=large_data)),
('Compressed NPZ', lambda: cp.savez_compressed('large_compressed.npz', data=large_data)),
]
file_sizes = {}
for name, save_func in save_methods:
start_time = time.perf_counter()
save_func()
cp.cuda.Stream.null.synchronize() # Wait for GPU operations
end_time = time.perf_counter()
# Get file size
if 'NPY' in name:
filename = 'large_data.npy'
elif 'Compressed' in name:
filename = 'large_compressed.npz'
else:
filename = 'large_data.npz'
file_size = os.path.getsize(filename) / 1024**2 # MB
file_sizes[name] = file_size
print(f"{name}: {(end_time - start_time)*1000:.1f} ms, "
f"file size: {file_size:.1f} MB")
# Time loading methods
load_methods = [
('Load NPY', lambda: cp.load('large_data.npy')),
('Load NPZ', lambda: cp.load('large_data.npz')['data']),
('Load Compressed', lambda: cp.load('large_compressed.npz')['data']),
]
for name, load_func in load_methods:
start_time = time.perf_counter()
loaded = load_func()
cp.cuda.Stream.null.synchronize()
end_time = time.perf_counter()
# Verify data integrity
is_equal = cp.array_equal(large_data, loaded)
print(f"{name}: {(end_time - start_time)*1000:.1f} ms, "
f"data integrity: {is_equal}")
# Demonstrate chunked processing for very large files
chunk_size = 10_000
print(f"\nProcessing data in chunks of {chunk_size:,} samples:")
# Save data in chunks
for i in range(0, n_samples, chunk_size):
chunk_end = min(i + chunk_size, n_samples)
chunk_data = large_data[i:chunk_end]
# Save each chunk
chunk_filename = f'chunk_{i//chunk_size:03d}.npy'
cp.save(chunk_filename, chunk_data)
if i < 3 * chunk_size: # Show first few chunks
print(f"Saved chunk {i//chunk_size + 1}: "
f"samples {i:,} to {chunk_end:,}")
print("Chunked saving completed")import cupy as cp
import numpy as np
import os
# Create large file for memory mapping demonstration
n_elements = 1_000_000
dtype = np.float32
# Create memory-mapped file
filename = 'large_memmap.dat'
mmap_array = np.memmap(filename, dtype=dtype, mode='w+', shape=(n_elements,))
# Fill with data (on CPU)
mmap_array[:] = np.random.randn(n_elements).astype(dtype)
mmap_array.flush() # Ensure data is written to disk
print(f"Created memory-mapped file: {filename}")
print(f"File size: {os.path.getsize(filename) / 1024**2:.1f} MB")
# Read portions of the file to GPU
chunk_size = 100_000
# Method 1: Load specific ranges
start_idx = 200_000
end_idx = start_idx + chunk_size
gpu_chunk = cp.asarray(mmap_array[start_idx:end_idx])
print(f"Loaded chunk [{start_idx:,}:{end_idx:,}] to GPU")
print(f"Chunk statistics: mean={cp.mean(gpu_chunk):.3f}, "
f"std={cp.std(gpu_chunk):.3f}")
# Method 2: Process file in streaming fashion
def process_large_file(filename, chunk_size=50_000):
"""Process large memory-mapped file in chunks."""
# Open as memory-mapped array
mmap_data = np.memmap(filename, dtype=dtype, mode='r')
n_total = len(mmap_data)
results = []
for i in range(0, n_total, chunk_size):
chunk_end = min(i + chunk_size, n_total)
# Load chunk to GPU
gpu_chunk = cp.asarray(mmap_data[i:chunk_end])
# Process chunk (example: compute statistics)
chunk_stats = {
'start': i,
'end': chunk_end,
'mean': float(cp.mean(gpu_chunk)),
'std': float(cp.std(gpu_chunk)),
'min': float(cp.min(gpu_chunk)),
'max': float(cp.max(gpu_chunk))
}
results.append(chunk_stats)
if i < 5 * chunk_size: # Show first few chunks
print(f"Chunk {i//chunk_size + 1}: "
f"mean={chunk_stats['mean']:.3f}, "
f"std={chunk_stats['std']:.3f}")
return results
# Process the large file
chunk_results = process_large_file(filename)
print(f"Processed {len(chunk_results)} chunks")
# Aggregate results
overall_mean = np.mean([r['mean'] for r in chunk_results])
print(f"Overall mean (approximate): {overall_mean:.3f}")
# Direct file operations
# Save array directly from GPU to file
gpu_data = cp.random.randn(50_000).astype(cp.float32)
with open('direct_output.bin', 'wb') as f:
# Convert to CPU and save
cpu_data = cp.asnumpy(gpu_data)
f.write(cpu_data.tobytes())
print(f"Saved GPU data directly to binary file")
# Read binary data back
with open('direct_output.bin', 'rb') as f:
raw_bytes = f.read()
recovered_data = cp.frombuffer(raw_bytes, dtype=cp.float32)
print(f"Recovered data shape: {recovered_data.shape}")
print(f"Data integrity: {cp.array_equal(gpu_data, recovered_data)}")import cupy as cp
# Create sample arrays for formatting examples
small_array = cp.array([1.23456789, -2.3456789, 0.0001234])
large_array = cp.random.randn(100, 50)
integer_array = cp.arange(1000).reshape(10, 100)
# Basic string representations
print("Array string representations:")
print(f"Small array str: {cp.array_str(small_array)}")
print(f"Small array repr: {cp.array_repr(small_array)}")
# Custom formatting options
formatted = cp.array2string(small_array,
precision=3,
suppress_small=True,
separator=', ')
print(f"Custom formatted: {formatted}")
# Set global print options
original_options = cp.get_printoptions()
print(f"Original print options: {original_options}")
# Temporarily change print options
with cp.printoptions(precision=2, suppress=True, threshold=20):
print("With custom print options:")
print(f"Small array: {small_array}")
print(f"Large array (summarized):\n{large_array}")
# Demonstrate different formatting styles
print("\nDifferent formatting styles:")
# Scientific notation
scientific = cp.array2string(small_array,
formatter={'float_kind': lambda x: f'{x:.2e}'})
print(f"Scientific: {scientific}")
# Fixed point
fixed_point = cp.array2string(small_array,
formatter={'float_kind': lambda x: f'{x:.4f}'})
print(f"Fixed point: {fixed_point}")
# Custom separators and brackets
custom_format = cp.array2string(small_array,
separator=' | ',
prefix='[',
suffix=']',
formatter={'float_kind': lambda x: f'{x:+.2f}'})
print(f"Custom format: {custom_format}")
# Integer formatting in different bases
binary_repr = cp.base_repr(42, base=2, padding=8)
hex_repr = cp.base_repr(42, base=16)
octal_repr = cp.base_repr(42, base=8)
print(f"\nNumber 42 in different bases:")
print(f"Binary (base 2): {binary_repr}")
print(f"Hexadecimal (base 16): {hex_repr}")
print(f"Octal (base 8): {octal_repr}")
# Array info for debugging
def array_info(arr, name="Array"):
"""Print comprehensive array information."""
print(f"\n{name} Information:")
print(f" Shape: {arr.shape}")
print(f" Dtype: {arr.dtype}")
print(f" Size: {arr.size:,} elements")
print(f" Memory: {arr.nbytes / 1024**2:.2f} MB")
print(f" Min/Max: {cp.min(arr):.3f} / {cp.max(arr):.3f}")
print(f" Mean/Std: {cp.mean(arr):.3f} / {cp.std(arr):.3f}")
# Show sample of data
if arr.size <= 20:
print(f" Data: {arr}")
else:
flat = arr.flatten()
sample = cp.concatenate([flat[:5], flat[-5:]])
print(f" Sample: [{sample[0]:.3f}, {sample[1]:.3f}, ..., "
f"{sample[-2]:.3f}, {sample[-1]:.3f}]")
# Demonstrate array info function
array_info(small_array, "Small Array")
array_info(large_array, "Large Array")
array_info(integer_array, "Integer Array")import cupy as cp
import numpy as np
# Demonstrate conversion between CuPy and NumPy
print("GPU ↔ CPU Data Transfer:")
# Create data on GPU
gpu_data = cp.random.randn(1000, 500).astype(cp.float32)
print(f"GPU data: shape={gpu_data.shape}, device={gpu_data.device}")
# Transfer to CPU
cpu_data = cp.asnumpy(gpu_data)
print(f"CPU data: shape={cpu_data.shape}, type={type(cpu_data)}")
# Transfer back to GPU
gpu_data2 = cp.asarray(cpu_data)
print(f"Back to GPU: shape={gpu_data2.shape}, device={gpu_data2.device}")
print(f"Data integrity: {cp.array_equal(gpu_data, gpu_data2)}")
# Demonstrate asynchronous transfer with streams
stream1 = cp.cuda.Stream()
stream2 = cp.cuda.Stream()
print("\nAsynchronous transfers:")
large_gpu_data = cp.random.randn(5000, 1000)
with stream1:
# Asynchronous transfer to CPU
cpu_result1 = cp.asnumpy(large_gpu_data[:2500], stream=stream1)
with stream2:
# Asynchronous transfer to CPU
cpu_result2 = cp.asnumpy(large_gpu_data[2500:], stream=stream2)
# Wait for transfers to complete
stream1.synchronize()
stream2.synchronize()
print(f"Async transfer 1 completed: {cpu_result1.shape}")
print(f"Async transfer 2 completed: {cpu_result2.shape}")
# Combine results
combined_cpu = np.concatenate([cpu_result1, cpu_result2])
print(f"Combined result: {combined_cpu.shape}")
print(f"Transfer integrity: {np.array_equal(cp.asnumpy(large_gpu_data), combined_cpu)}")
# Demonstrate zero-copy operations where possible
print("\nMemory sharing and zero-copy operations:")
# Create GPU array
original_gpu = cp.arange(1000)
print(f"Original GPU array: {original_gpu.shape}")
# Create view (shares memory)
gpu_view = original_gpu[::2] # Every other element
print(f"GPU view: {gpu_view.shape}, shares memory: {cp.shares_memory(original_gpu, gpu_view)}")
# Modify view
gpu_view *= 10
# Check if original was modified
print(f"Original modified through view: {cp.sum(original_gpu != cp.arange(1000)) > 0}")
print(f"First few elements of original: {original_gpu[:10]}")
# DLPack interoperability (if available)
try:
# Convert to DLPack tensor
dlpack_tensor = gpu_data.__dlpack__()
# Create new array from DLPack tensor
from_dlpack = cp.from_dlpack(dlpack_tensor)
print(f"\nDLPack interoperability:")
print(f"Original shape: {gpu_data.shape}")
print(f"From DLPack shape: {from_dlpack.shape}")
print(f"Memory shared: {cp.shares_memory(gpu_data, from_dlpack)}")
except Exception as e:
print(f"DLPack not available: {e}")
# Clean up files created in examples
import os
files_to_clean = [
'data.npy', 'dataset.npz', 'dataset_compressed.npz',
'measurements.txt', 'weather.txt', 'large_data.npy',
'large_data.npz', 'large_compressed.npz', 'large_memmap.dat',
'direct_output.bin'
]
for filename in files_to_clean:
if os.path.exists(filename):
os.remove(filename)
# Clean up chunk files
for i in range(100): # Arbitrary upper limit
chunk_file = f'chunk_{i:03d}.npy'
if os.path.exists(chunk_file):
os.remove(chunk_file)
print("\nCleaned up temporary files")Install with Tessl CLI
npx tessl i tessl/pypi-cupy-cuda114