A Python wrapper of libjpeg-turbo for decoding and encoding JPEG images.
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Encoding functionality for converting numpy arrays to JPEG data with comprehensive quality control, subsampling options, various pixel format support, progressive encoding, and in-place encoding for memory efficiency.
Encode numpy arrays to JPEG format with full control over quality, subsampling, pixel formats, and encoding flags.
def encode(
img_array: np.ndarray,
quality: int = 85,
pixel_format: int = TJPF_BGR,
jpeg_subsample: int = TJSAMP_422,
flags: int = 0,
dst: bytearray | None = None
) -> bytes | tuple[bytearray, int]:
"""
Encode numpy array to JPEG memory buffer.
Args:
img_array: Input image array (height, width, channels)
quality: JPEG quality level (1-100, higher = better quality)
pixel_format: Input pixel format (TJPF_* constants)
jpeg_subsample: Chroma subsampling (TJSAMP_* constants)
flags: Encoding flags (TJFLAG_* constants)
dst: Optional pre-allocated buffer for in-place encoding
Returns:
bytes: JPEG data (if dst=None)
tuple[bytearray, int]: (buffer, actual_size) if dst provided
"""Encode from YUV format data directly to JPEG, useful when working with video data or avoiding color space conversions.
def encode_from_yuv(
img_array: np.ndarray,
height: int,
width: int,
quality: int = 85,
jpeg_subsample: int = TJSAMP_420,
flags: int = 0
) -> bytes:
"""
Encode YUV array to JPEG memory buffer.
Args:
img_array: YUV image data as contiguous array
height: Image height in pixels
width: Image width in pixels
quality: JPEG quality level (1-100)
jpeg_subsample: Chroma subsampling (TJSAMP_* constants)
flags: Encoding flags (TJFLAG_* constants)
Returns:
bytes: JPEG data
"""Quality parameter ranges from 1-100:
TJSAMP_444: int # 4:4:4 - No subsampling (best quality, largest size)
TJSAMP_422: int # 4:2:2 - Horizontal subsampling (default)
TJSAMP_420: int # 4:2:0 - Both horizontal and vertical subsampling
TJSAMP_GRAY: int # Grayscale encoding
TJSAMP_440: int # 4:4:0 - Vertical subsampling
TJSAMP_411: int # 4:1:1 - High horizontal subsampling
TJSAMP_441: int # 4:4:1 - Minimal subsamplingTJFLAG_PROGRESSIVE: int # Progressive JPEG encoding
TJFLAG_FASTDCT: int # Fast DCT (lower quality, faster)
TJFLAG_ACCURATEDCT: int # Accurate DCT (higher quality, slower)
TJFLAG_BOTTOMUP: int # Bottom-up pixel orderTJPF_RGB: int # RGB pixel format
TJPF_BGR: int # BGR pixel format (OpenCV default)
TJPF_RGBX: int # RGBX pixel format
TJPF_BGRX: int # BGRX pixel format
TJPF_XBGR: int # XBGR pixel format
TJPF_XRGB: int # XRGB pixel format
TJPF_GRAY: int # Grayscale pixel format
TJPF_RGBA: int # RGBA pixel format
TJPF_BGRA: int # BGRA pixel format
TJPF_ABGR: int # ABGR pixel format
TJPF_ARGB: int # ARGB pixel format
TJPF_CMYK: int # CMYK pixel formatimport numpy as np
from turbojpeg import TurboJPEG
jpeg = TurboJPEG()
# Create sample image data
image = np.random.randint(0, 256, (480, 640, 3), dtype=np.uint8)
# Encode with default settings (quality=85, BGR, 4:2:2 subsampling)
jpeg_data = jpeg.encode(image)
# Save to file
with open('output.jpg', 'wb') as f:
f.write(jpeg_data)# High quality encoding
high_quality = jpeg.encode(image, quality=95)
# Low quality encoding (smaller file size)
low_quality = jpeg.encode(image, quality=30)
# Maximum quality
max_quality = jpeg.encode(image, quality=100)
print(f"High quality size: {len(high_quality)} bytes")
print(f"Low quality size: {len(low_quality)} bytes")
print(f"Max quality size: {len(max_quality)} bytes")from turbojpeg import TJSAMP_444, TJSAMP_420, TJSAMP_GRAY
# No subsampling (best quality, largest size)
no_subsample = jpeg.encode(image, jpeg_subsample=TJSAMP_444)
# 4:2:0 subsampling (smaller file, web standard)
subsample_420 = jpeg.encode(image, jpeg_subsample=TJSAMP_420)
# Grayscale encoding
grayscale = jpeg.encode(image, jpeg_subsample=TJSAMP_GRAY)
print(f"4:4:4 size: {len(no_subsample)} bytes")
print(f"4:2:0 size: {len(subsample_420)} bytes")
print(f"Grayscale size: {len(grayscale)} bytes")from turbojpeg import TJFLAG_PROGRESSIVE
# Progressive JPEG (loads incrementally in browsers)
progressive_jpeg = jpeg.encode(
image,
quality=90,
flags=TJFLAG_PROGRESSIVE
)
with open('progressive.jpg', 'wb') as f:
f.write(progressive_jpeg)from turbojpeg import TJPF_RGB, TJPF_GRAY
# RGB image (from PIL/Pillow)
rgb_image = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]], dtype=np.uint8)
rgb_image = rgb_image.reshape(1, 3, 3) # 1x3 image
rgb_jpeg = jpeg.encode(rgb_image, pixel_format=TJPF_RGB)
# Grayscale image
gray_image = np.random.randint(0, 256, (100, 100, 1), dtype=np.uint8)
gray_jpeg = jpeg.encode(gray_image, pixel_format=TJPF_GRAY)# Calculate required buffer size
buffer_size = jpeg.buffer_size(image)
print(f"Required buffer size: {buffer_size} bytes")
# Pre-allocate buffer
buffer = bytearray(buffer_size)
# Encode in-place
result, actual_size = jpeg.encode(image, dst=buffer)
# result is the same buffer object
assert result is buffer
# Write only the used portion
with open('inplace.jpg', 'wb') as f:
f.write(buffer[:actual_size])
print(f"Actual encoded size: {actual_size} bytes")# Encode from YUV data (e.g., from video processing)
yuv_data = np.random.randint(0, 256, (480*640*3//2,), dtype=np.uint8)
yuv_jpeg = jpeg.encode_from_yuv(
yuv_data,
height=480,
width=640,
quality=85,
jpeg_subsample=TJSAMP_420
)
with open('from_yuv.jpg', 'wb') as f:
f.write(yuv_jpeg)from turbojpeg import TJFLAG_FASTDCT
# Fast encoding (lower quality, higher speed)
fast_jpeg = jpeg.encode(
image,
quality=75,
flags=TJFLAG_FASTDCT
)
# Accurate encoding (higher quality, slower)
from turbojpeg import TJFLAG_ACCURATEDCT
accurate_jpeg = jpeg.encode(
image,
quality=85,
flags=TJFLAG_ACCURATEDCT
)Install with Tessl CLI
npx tessl i tessl/pypi-pyturbo-jpeg