Database abstraction layer supporting multiple storage backends for scalable search applications with persistent indexes and cross-session data retention. FlexSearch provides adapters for various database systems to enable persistent search capabilities.
Abstract base class for all persistent storage implementations providing a common interface for database operations.
/**
* Base class for persistent storage implementations
* @param name - Database name or identifier
* @param config - Persistent storage configuration
*/
class StorageInterface {
constructor(name: string, config: PersistentOptions);
constructor(config: string | PersistentOptions);
/** Database connection reference */
db: any;
}Core database operations for connection management and data persistence.
/**
* Mount storage interface to a search index
* @param index - Index or Document instance to mount
* @returns Promise resolving when mount is complete
*/
mount(index: Index | Document): Promise<void>;
/**
* Open database connection
* @returns Promise resolving when connection is established
*/
open(): Promise<void>;
/**
* Close database connection
* @returns Promise resolving when connection is closed
*/
close(): Promise<void>;
/**
* Destroy database and remove all data
* @returns Promise resolving when destruction is complete
*/
destroy(): Promise<void>;
/**
* Clear all data from database
* @returns Promise resolving when data is cleared
*/
clear(): Promise<void>;Usage Examples:
import { Index } from "flexsearch";
import IndexedDB from "flexsearch/db/indexeddb";
// Create storage interface
const storage = new IndexedDB("my-search-database", {
version: 1,
stores: ["index", "documents"]
});
// Create index
const index = new Index();
// Mount storage to index
await storage.mount(index);
// Add data - automatically persisted
index.add(1, "This content will be persisted");
index.add(2, "Another persistent document");
// Search works normally with persistent data
const results = index.search("persistent");
// Close connection when done
await storage.close();Browser-based persistent storage using IndexedDB for client-side data retention.
/**
* IndexedDB implementation for browser persistent storage
* @param name - Database name
* @param config - IndexedDB configuration options
*/
class IndexedDB extends StorageInterface {
constructor(name: string, config?: IndexedDBOptions);
}
interface IndexedDBOptions extends PersistentOptions {
/** Database version number */
version?: number;
/** Object store names to create */
stores?: string[];
/** Upgrade callback for schema changes */
upgrade?: (db: IDBDatabase, oldVersion: number, newVersion: number) => void;
}Usage Examples:
import IndexedDB from "flexsearch/db/indexeddb";
import { Document } from "flexsearch";
// Create IndexedDB storage with custom configuration
const storage = new IndexedDB("search-app", {
version: 2,
stores: ["articles", "metadata"],
upgrade: (db, oldVersion, newVersion) => {
if (oldVersion < 2) {
// Handle schema migration
const store = db.createObjectStore("articles", { keyPath: "id" });
store.createIndex("date", "publishDate");
}
}
});
// Create document index with persistent storage
const docIndex = new Document({
document: {
id: "id",
index: ["title", "content"],
store: ["title", "content", "author", "publishDate"]
}
});
// Mount storage
await storage.mount(docIndex);
// Add documents - persisted automatically
docIndex.add({
id: 1,
title: "Persistent Search Guide",
content: "How to implement persistent search...",
author: "John Doe",
publishDate: "2023-01-15"
});Redis-based storage for high-performance server-side persistent search indexes.
import Redis from "flexsearch/db/redis";
/**
* Redis implementation for server-side persistent storage
* @param config - Redis connection configuration
*/
class Redis extends StorageInterface {
constructor(config: RedisOptions);
}
interface RedisOptions extends PersistentOptions {
/** Redis server host */
host?: string;
/** Redis server port */
port?: number;
/** Redis database number */
db?: number;
/** Authentication password */
password?: string;
/** Connection timeout */
connectTimeout?: number;
/** Command timeout */
commandTimeout?: number;
}Usage Examples:
import Redis from "flexsearch/db/redis";
import { Index } from "flexsearch";
// Create Redis storage
const storage = new Redis({
host: "localhost",
port: 6379,
db: 0,
password: "your-redis-password"
});
// Create index with Redis persistence
const index = new Index({
tokenize: "forward",
cache: true
});
await storage.mount(index);
// Data is now persisted in Redis
index.add(1, "Redis-backed search content");PostgreSQL database adapter for enterprise-scale persistent search applications.
import PostgreSQL from "flexsearch/db/postgres";
/**
* PostgreSQL implementation for enterprise persistent storage
* @param config - PostgreSQL connection configuration
*/
class PostgreSQL extends StorageInterface {
constructor(config: PostgreSQLOptions);
}
interface PostgreSQLOptions extends PersistentOptions {
/** Database host */
host?: string;
/** Database port */
port?: number;
/** Database name */
database: string;
/** Username for authentication */
user: string;
/** Password for authentication */
password: string;
/** SSL configuration */
ssl?: boolean | object;
/** Connection pool size */
poolSize?: number;
}Usage Examples:
import PostgreSQL from "flexsearch/db/postgres";
import { Document } from "flexsearch";
// Create PostgreSQL storage
const storage = new PostgreSQL({
host: "localhost",
port: 5432,
database: "search_app",
user: "search_user",
password: "secure_password",
ssl: true,
poolSize: 10
});
// Document index with PostgreSQL persistence
const docIndex = new Document({
document: {
id: "id",
index: ["title", "description", "tags"],
store: true
}
});
await storage.mount(docIndex);MongoDB adapter for document-oriented persistent search with flexible schema support.
import MongoDB from "flexsearch/db/mongodb";
/**
* MongoDB implementation for document-oriented persistent storage
* @param config - MongoDB connection configuration
*/
class MongoDB extends StorageInterface {
constructor(config: MongoDBOptions);
}
interface MongoDBOptions extends PersistentOptions {
/** MongoDB connection URI */
uri: string;
/** Database name */
database: string;
/** Collection name */
collection?: string;
/** Connection options */
options?: object;
}Usage Examples:
import MongoDB from "flexsearch/db/mongodb";
import { Document } from "flexsearch";
// Create MongoDB storage
const storage = new MongoDB({
uri: "mongodb://localhost:27017",
database: "search_app",
collection: "search_indexes",
options: {
useNewUrlParser: true,
useUnifiedTopology: true
}
});Lightweight file-based SQLite storage for desktop and mobile applications.
import SQLite from "flexsearch/db/sqlite";
/**
* SQLite implementation for lightweight file-based persistent storage
* @param filename - SQLite database file path
* @param config - SQLite configuration options
*/
class SQLite extends StorageInterface {
constructor(filename: string, config?: SQLiteOptions);
}
interface SQLiteOptions extends PersistentOptions {
/** Database file mode */
mode?: number;
/** Enable Write-Ahead Logging */
wal?: boolean;
/** Synchronous mode */
synchronous?: "OFF" | "NORMAL" | "FULL";
}Usage Examples:
import SQLite from "flexsearch/db/sqlite";
import { Index } from "flexsearch";
// Create SQLite storage
const storage = new SQLite("./search-database.db", {
wal: true,
synchronous: "NORMAL"
});
const index = new Index();
await storage.mount(index);
// Data persisted to SQLite file
index.add(1, "SQLite persistent content");ClickHouse adapter for high-performance analytical search applications with columnar storage.
import ClickHouse from "flexsearch/db/clickhouse";
/**
* ClickHouse implementation for analytical persistent storage
* @param config - ClickHouse connection configuration
*/
class ClickHouse extends StorageInterface {
constructor(config: ClickHouseOptions);
}
interface ClickHouseOptions extends PersistentOptions {
/** ClickHouse server host */
host?: string;
/** ClickHouse server port */
port?: number;
/** Database name */
database?: string;
/** Username for authentication */
user?: string;
/** Password for authentication */
password?: string;
/** Request timeout */
timeout?: number;
}Usage Examples:
import ClickHouse from "flexsearch/db/clickhouse";
import { Document } from "flexsearch";
// Create ClickHouse storage for analytics
const storage = new ClickHouse({
host: "localhost",
port: 8123,
database: "search_analytics",
user: "default",
timeout: 30000
});interface PersistentOptions {
/** Storage adapter name or identifier */
adapter?: string;
/** Compression configuration */
compress?: boolean | CompressionOptions;
/** Encryption configuration */
encrypt?: boolean | EncryptionOptions;
/** Automatic commit interval in milliseconds */
commitInterval?: number;
/** Maximum cache size for write operations */
cacheSize?: number;
/** Batch size for bulk operations */
batchSize?: number;
/** Connection retry configuration */
retry?: RetryOptions;
}
interface CompressionOptions {
/** Compression algorithm */
algorithm?: "gzip" | "lz4" | "brotli";
/** Compression level */
level?: number;
}
interface EncryptionOptions {
/** Encryption algorithm */
algorithm?: "aes-256-gcm" | "aes-192-gcm" | "aes-128-gcm";
/** Encryption key */
key: string | Buffer;
/** Initialization vector */
iv?: string | Buffer;
}
interface RetryOptions {
/** Maximum retry attempts */
maxAttempts?: number;
/** Retry delay in milliseconds */
delay?: number;
/** Exponential backoff multiplier */
backoff?: number;
}// Custom storage configuration examples
const advancedStorage = new PostgreSQL({
host: "localhost",
port: 5432,
database: "enterprise_search",
user: "search_service",
password: process.env.DB_PASSWORD,
ssl: {
rejectUnauthorized: true,
ca: fs.readFileSync("ca-certificate.crt"),
key: fs.readFileSync("client-key.key"),
cert: fs.readFileSync("client-certificate.crt")
},
poolSize: 20,
compress: {
algorithm: "gzip",
level: 6
},
encrypt: {
algorithm: "aes-256-gcm",
key: process.env.ENCRYPTION_KEY
},
commitInterval: 5000,
cacheSize: 1000,
batchSize: 100,
retry: {
maxAttempts: 3,
delay: 1000,
backoff: 2
}
});Some storage adapters support transactional operations for data consistency.
/**
* Begin database transaction
* @returns Promise resolving to transaction context
*/
beginTransaction(): Promise<Transaction>;
/**
* Commit current transaction
* @param transaction - Transaction context
* @returns Promise resolving when commit is complete
*/
commitTransaction(transaction: Transaction): Promise<void>;
/**
* Rollback current transaction
* @param transaction - Transaction context
* @returns Promise resolving when rollback is complete
*/
rollbackTransaction(transaction: Transaction): Promise<void>;Export and import operations for data backup and migration.
/**
* Export database to backup format
* @param options - Export configuration
* @returns Promise resolving to backup data
*/
export(options?: ExportOptions): Promise<BackupData>;
/**
* Import data from backup
* @param backupData - Previously exported data
* @param options - Import configuration
* @returns Promise resolving when import is complete
*/
import(backupData: BackupData, options?: ImportOptions): Promise<void>;Usage Examples:
// Backup search index data
const backup = await storage.export({
format: "json",
compress: true
});
// Save backup to file
fs.writeFileSync("search-backup.json.gz", backup);
// Restore from backup
const backupData = fs.readFileSync("search-backup.json.gz");
await storage.import(backupData, {
overwrite: true,
validate: true
});type StorageAdapter = "indexeddb" | "redis" | "postgres" | "mongodb" | "sqlite" | "clickhouse";
interface BackupData {
version: string;
timestamp: number;
data: any;
metadata: object;
}
interface ExportOptions {
format?: "json" | "binary";
compress?: boolean;
includeMetadata?: boolean;
}
interface ImportOptions {
overwrite?: boolean;
validate?: boolean;
merge?: boolean;
}
interface Transaction {
id: string;
timestamp: number;
operations: Operation[];
}