Fast Python bindings for the UnQLite embedded NoSQL database.
Random data generation and library information utilities provided by UnQLite for testing, development, and system information access.
Generate random strings and integers for testing, unique identifiers, and sample data creation.
def random_string(self, int nbytes):
"""Generate a random string of given length.
Args:
nbytes: Number of bytes for the random string
Returns:
bytes: Random byte string of specified length
"""
...
def random_int(self):
"""Generate a random integer.
Returns:
int: Random integer value
"""
...Usage Example:
db = unqlite.UnQLite(':mem:')
# Generate random strings of different lengths
short_random = db.random_string(8)
print(f"Short random (8 bytes): {short_random}")
medium_random = db.random_string(32)
print(f"Medium random (32 bytes): {medium_random}")
long_random = db.random_string(128)
print(f"Long random (128 bytes): {len(long_random)} bytes")
# Generate random integers
for i in range(5):
rand_int = db.random_int()
print(f"Random integer {i+1}: {rand_int}")Use random functions to create unique identifiers for records:
import hashlib
import time
db = unqlite.UnQLite(':mem:')
def generate_unique_id():
"""Generate unique identifier using random data and timestamp."""
random_bytes = db.random_string(16)
timestamp = str(time.time()).encode()
# Create hash from random data and timestamp
hash_input = random_bytes + timestamp
unique_id = hashlib.sha256(hash_input).hexdigest()[:16]
return unique_id
# Generate unique IDs for records
for i in range(5):
record_id = generate_unique_id()
db[f"record_{record_id}"] = f"Data for record {i+1}"
print(f"Generated ID: {record_id}")
# Verify records were stored
print(f"Total records: {len(db)}")Generate random data for security applications:
import base64
db = unqlite.UnQLite(':mem:')
def generate_session_token():
"""Generate secure session token."""
random_bytes = db.random_string(32) # 256 bits of randomness
token = base64.urlsafe_b64encode(random_bytes).decode().rstrip('=')
return token
def generate_api_key():
"""Generate API key with prefix."""
random_bytes = db.random_string(24) # 192 bits
key_data = base64.urlsafe_b64encode(random_bytes).decode().rstrip('=')
return f"ak_{key_data}"
# Generate tokens
session_token = generate_session_token()
api_key = generate_api_key()
print(f"Session token: {session_token}")
print(f"API key: {api_key}")
# Store tokens with expiration info
import time
current_time = int(time.time())
db['sessions'] = f'{{"token": "{session_token}", "created": {current_time}, "expires": {current_time + 3600}}}'
db['api_keys'] = f'{{"key": "{api_key}", "created": {current_time}, "active": true}}'Create realistic test data using random utilities:
import json
import random
db = unqlite.UnQLite('testdata.db')
# Sample data templates
FIRST_NAMES = ['Alice', 'Bob', 'Charlie', 'Diana', 'Eve', 'Frank', 'Grace', 'Henry']
LAST_NAMES = ['Smith', 'Johnson', 'Williams', 'Brown', 'Jones', 'Garcia', 'Miller', 'Davis']
DEPARTMENTS = ['Engineering', 'Marketing', 'Sales', 'HR', 'Finance', 'Operations']
def generate_test_user():
"""Generate test user with random data."""
# Use database random functions
user_id = db.random_int() % 10000 + 1000 # ID between 1000-10999
# Random selections from predefined lists
first_name = random.choice(FIRST_NAMES)
last_name = random.choice(LAST_NAMES)
department = random.choice(DEPARTMENTS)
# Random salary in realistic range
salary = (db.random_int() % 80000) + 40000 # $40k-$120k
# Random binary data for profile picture placeholder
avatar_data = db.random_string(64) # Simulate image hash
user = {
'id': user_id,
'name': f"{first_name} {last_name}",
'email': f"{first_name.lower()}.{last_name.lower()}@example.com",
'department': department,
'salary': salary,
'active': db.random_int() % 10 > 1, # 90% active
'avatar_hash': avatar_data.hex()
}
return user
# Generate test dataset
print("Generating test users...")
for i in range(50):
user = generate_test_user()
db[f"user:{user['id']}"] = json.dumps(user)
print(f"Generated {len(db)} test records")
# Verify some records
with db.cursor() as cursor:
cursor.first()
for _ in range(3): # Show first 3 records
if cursor.is_valid():
key = cursor.key()
value = json.loads(cursor.value())
print(f"{key}: {value['name']} - {value['department']}")
try:
cursor.next_entry()
except StopIteration:
breakAccess UnQLite library version and configuration information.
def lib_version(self):
"""Get UnQLite library version.
Returns:
str: Version string of the UnQLite C library
"""
...Usage Example:
db = unqlite.UnQLite(':mem:')
# Get library version
version = db.lib_version()
print(f"UnQLite library version: {version}")
# Display system information
print(f"Database type: {'In-memory' if db.is_memory else 'File-based'}")
print(f"Database open: {db.is_open}")
print(f"Database file: {db.filename}")
# Create system info record
import sys
import platform
system_info = {
'unqlite_version': version,
'python_version': sys.version,
'platform': platform.platform(),
'database_type': 'memory' if db.is_memory else 'file'
}
db['system_info'] = json.dumps(system_info, indent=2)
print("System information stored")Use random data for performance testing:
import time
import statistics
db = unqlite.UnQLite(':mem:')
def benchmark_writes(num_records):
"""Benchmark write performance with random data."""
print(f"Benchmarking {num_records} writes with random data...")
start_time = time.time()
write_times = []
for i in range(num_records):
record_start = time.time()
# Generate random key and value
key_data = db.random_string(16)
value_data = db.random_string(64)
key = f"benchmark_{key_data.hex()[:8]}"
value = value_data.hex()
db[key] = value
record_time = time.time() - record_start
write_times.append(record_time)
total_time = time.time() - start_time
print(f"Total time: {total_time:.4f} seconds")
print(f"Records per second: {num_records / total_time:.2f}")
print(f"Average write time: {statistics.mean(write_times) * 1000:.4f} ms")
print(f"Min write time: {min(write_times) * 1000:.4f} ms")
print(f"Max write time: {max(write_times) * 1000:.4f} ms")
def benchmark_reads(num_reads):
"""Benchmark read performance."""
print(f"Benchmarking {num_reads} random reads...")
# Get all keys for random selection
all_keys = list(db.keys())
if len(all_keys) == 0:
print("No data to read")
return
start_time = time.time()
read_times = []
for i in range(num_reads):
# Select random key
key = random.choice(all_keys)
record_start = time.time()
value = db[key]
record_time = time.time() - record_start
read_times.append(record_time)
total_time = time.time() - start_time
print(f"Total time: {total_time:.4f} seconds")
print(f"Reads per second: {num_reads / total_time:.2f}")
print(f"Average read time: {statistics.mean(read_times) * 1000:.4f} ms")
# Run benchmarks
benchmark_writes(1000)
benchmark_reads(500)
print(f"Final database size: {len(db)} records")Generate test data for validation scenarios:
db = unqlite.UnQLite(':mem:')
def generate_edge_case_data():
"""Generate edge cases for testing."""
test_cases = []
# Empty data
test_cases.append(('empty_string', ''))
test_cases.append(('empty_bytes', b''))
# Random binary data
binary_data = db.random_string(256)
test_cases.append(('random_binary', binary_data))
# Large random string
large_string = db.random_string(1024 * 10).hex() # 10KB as hex
test_cases.append(('large_string', large_string))
# Random numbers
for i in range(5):
rand_num = db.random_int()
test_cases.append((f'random_int_{i}', str(rand_num)))
return test_cases
# Generate and store test cases
test_data = generate_edge_case_data()
print("Storing edge case test data...")
for key, value in test_data:
db[f'test_{key}'] = value
print(f"Stored: {key} ({type(value).__name__}, {len(str(value))} chars)")
# Verify all test data can be retrieved
print("\nVerifying test data retrieval...")
for key, expected_value in test_data:
stored_key = f'test_{key}'
retrieved_value = db[stored_key]
# For bytes, compare as bytes
if isinstance(expected_value, bytes):
matches = retrieved_value == expected_value
else:
matches = str(retrieved_value) == str(expected_value)
status = "✓" if matches else "✗"
print(f"{status} {key}: {matches}")# Good: Appropriate random data size for use case
session_id = db.random_string(32) # 256 bits for session IDs
csrf_token = db.random_string(16) # 128 bits for CSRF tokens
# Handle binary data properly
random_bytes = db.random_string(20)
hex_string = random_bytes.hex() # Convert to hex string
b64_string = base64.b64encode(random_bytes).decode() # Convert to base64
# Version-aware code
version = db.lib_version()
if version >= "1.1.9":
# Use newer features
pass
else:
# Fallback for older versions
passInstall with Tessl CLI
npx tessl i tessl/pypi-unqlite