Lightning fast normal and incremental MD5 hashing for JavaScript
npx @tessl/cli install tessl/npm-spark-md5@3.0.0SparkMD5 is a lightning-fast MD5 hashing library for JavaScript that provides both normal (one-shot) and incremental MD5 hashing capabilities. It offers significant performance improvements over other MD5 implementations with UTF-8 string conversion, overflow protection for large data sets, memory-efficient incremental hashing for files and large datasets, support for array buffers and typed arrays, and comprehensive APIs for both string and binary data processing.
npm install spark-md5const SparkMD5 = require("spark-md5");For ES modules:
import SparkMD5 from "spark-md5";For AMD:
define(["spark-md5"], function(SparkMD5) {
// Use SparkMD5
});Browser global:
<script src="spark-md5.js"></script>
<script>
// SparkMD5 is available as a global
const hash = SparkMD5.hash("hello");
</script>const SparkMD5 = require("spark-md5");
// Direct hashing of strings
const hexHash = SparkMD5.hash("Hi there"); // hex hash
const rawHash = SparkMD5.hash("Hi there", true); // raw binary string
// Incremental hashing for large data
const spark = new SparkMD5();
spark.append("Hi");
spark.append(" there");
const result = spark.end(); // hex hash
// ArrayBuffer hashing for binary data
const buffer = new ArrayBuffer(16);
const hashFromBuffer = SparkMD5.ArrayBuffer.hash(buffer);
// Incremental ArrayBuffer hashing
const sparkAB = new SparkMD5.ArrayBuffer();
sparkAB.append(buffer);
const resultAB = sparkAB.end();SparkMD5 provides two main classes for different data types:
Direct MD5 hashing of strings with automatic UTF-8 conversion.
/**
* Performs MD5 hash on a string with UTF-8 conversion
* @param {string} str - The string to hash
* @param {boolean} [raw=false] - Optional, true for raw binary string, false for hex string
* @returns {string} The computed MD5 hash
*/
SparkMD5.hash(str, raw);Direct MD5 hashing of binary strings without UTF-8 conversion.
/**
* Performs MD5 hash on a binary string
* @param {string} content - The binary string to hash
* @param {boolean} [raw=false] - Optional, true for raw binary string, false for hex string
* @returns {string} The computed MD5 hash
*/
SparkMD5.hashBinary(content, raw);Create an incremental MD5 hasher for processing large strings in chunks.
/**
* Creates a new SparkMD5 instance for incremental hashing
* @constructor
*/
function SparkMD5();
/**
* Appends a string with UTF-8 conversion if necessary
* @param str - The string to append
* @returns The SparkMD5 instance for chaining
*/
SparkMD5.prototype.append(str);
/**
* Appends a binary string without UTF-8 conversion
* @param {string} contents - The binary string to append
* @returns {SparkMD5} The SparkMD5 instance for chaining
*/
SparkMD5.prototype.appendBinary(contents);
/**
* Finishes the computation and returns the result
* @param raw - Optional, true for raw binary string, false for hex string
* @returns The computed MD5 hash
*/
SparkMD5.prototype.end(raw);
/**
* Resets the internal state for reuse
* @returns The SparkMD5 instance for chaining
*/
SparkMD5.prototype.reset();
/**
* Gets the internal computation state for resuming later
* @returns State object with buff, length, and hash properties
*/
SparkMD5.prototype.getState();
/**
* Sets the internal computation state from a previous getState() call
* @param state - State object from getState()
* @returns The SparkMD5 instance for chaining
*/
SparkMD5.prototype.setState(state);
/**
* Releases memory used by the incremental buffer
* @returns void
*/
SparkMD5.prototype.destroy();Usage Examples:
const SparkMD5 = require("spark-md5");
// Basic incremental hashing
const spark = new SparkMD5();
spark.append("Hello");
spark.append(" ");
spark.append("World");
const hash = spark.end(); // "65a8e27d8879283831b664bd8b7f0ad4"
// State management for resumable hashing
const spark1 = new SparkMD5();
spark1.append("Hello");
const state = spark1.getState(); // Save state
const spark2 = new SparkMD5();
spark2.setState(state); // Resume from saved state
spark2.append(" World");
const hash2 = spark2.end(); // Same result as above
// Memory cleanup
spark1.destroy();
spark2.destroy();Direct MD5 hashing of ArrayBuffer and typed array data.
/**
* Performs MD5 hash on an ArrayBuffer
* @param arr - The ArrayBuffer to hash
* @param raw - Optional, true for raw binary string, false for hex string
* @returns The computed MD5 hash
*/
SparkMD5.ArrayBuffer.hash(arr, raw);Create an incremental MD5 hasher for processing large binary data in chunks.
/**
* Creates a new SparkMD5.ArrayBuffer instance for incremental binary hashing
* @constructor
*/
function SparkMD5.ArrayBuffer();
/**
* Appends an ArrayBuffer to the hash computation
* @param arr - The ArrayBuffer to append
* @returns The SparkMD5.ArrayBuffer instance for chaining
*/
SparkMD5.ArrayBuffer.prototype.append(arr);
/**
* Finishes the computation and returns the result
* @param raw - Optional, true for raw binary string, false for hex string
* @returns The computed MD5 hash
*/
SparkMD5.ArrayBuffer.prototype.end(raw);
/**
* Resets the internal state for reuse
* @returns The SparkMD5.ArrayBuffer instance for chaining
*/
SparkMD5.ArrayBuffer.prototype.reset();
/**
* Gets the internal computation state for resuming later
* @returns State object with buff, length, and hash properties
*/
SparkMD5.ArrayBuffer.prototype.getState();
/**
* Sets the internal computation state from a previous getState() call
* @param state - State object from getState()
* @returns The SparkMD5.ArrayBuffer instance for chaining
*/
SparkMD5.ArrayBuffer.prototype.setState(state);
/**
* Releases memory used by the incremental buffer
* @returns void
*/
SparkMD5.ArrayBuffer.prototype.destroy();Usage Examples:
const SparkMD5 = require("spark-md5");
// Direct ArrayBuffer hashing
const buffer = new ArrayBuffer(16);
const view = new Uint8Array(buffer);
view.set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
const hash = SparkMD5.ArrayBuffer.hash(buffer);
// Incremental file processing example
function hashFileInChunks(file) {
return new Promise((resolve, reject) => {
const chunkSize = 2097152; // 2MB chunks
const chunks = Math.ceil(file.size / chunkSize);
let currentChunk = 0;
const spark = new SparkMD5.ArrayBuffer();
const fileReader = new FileReader();
fileReader.onload = function(e) {
spark.append(e.target.result);
currentChunk++;
if (currentChunk < chunks) {
loadNext();
} else {
const hash = spark.end();
spark.destroy();
resolve(hash);
}
};
fileReader.onerror = function() {
spark.destroy();
reject(new Error("File reading failed"));
};
function loadNext() {
const start = currentChunk * chunkSize;
const end = Math.min(start + chunkSize, file.size);
fileReader.readAsArrayBuffer(file.slice(start, end));
}
loadNext();
});
}/**
* State object returned by getState() methods
*/
interface State {
buff: string; // Internal buffer content
length: number; // Total length processed
hash: number[]; // Internal hash state array
}
/**
* SparkMD5 constructor function and static methods
*/
interface SparkMD5Static {
new(): SparkMD5;
hash(str: string, raw?: boolean): string;
hashBinary(content: string, raw?: boolean): string;
ArrayBuffer: SparkMD5ArrayBufferStatic;
}
/**
* SparkMD5 instance methods
*/
interface SparkMD5 {
append(str: string): SparkMD5;
appendBinary(contents: string): SparkMD5;
end(raw?: boolean): string;
reset(): SparkMD5;
getState(): State;
setState(state: State): SparkMD5;
destroy(): void;
}
/**
* SparkMD5.ArrayBuffer constructor function and static methods
*/
interface SparkMD5ArrayBufferStatic {
new(): SparkMD5ArrayBuffer;
hash(arr: ArrayBuffer, raw?: boolean): string;
}
/**
* SparkMD5.ArrayBuffer instance methods
*/
interface SparkMD5ArrayBuffer {
append(arr: ArrayBuffer): SparkMD5ArrayBuffer;
end(raw?: boolean): string;
reset(): SparkMD5ArrayBuffer;
getState(): State;
setState(state: State): SparkMD5ArrayBuffer;
destroy(): void;
}