The gmeasure package provides performance measurement and benchmarking utilities for Go tests. It allows you to record measurements, compute statistics, and generate formatted reports of your benchmark results.
gmeasure is designed for performance testing and benchmarking within your test suite. It provides:
{ .api }
type Experiment struct {
// Contains filtered or unexported fields
}The Experiment type represents a performance experiment for measuring code. It collects measurements and provides statistical analysis of the results.
{ .api }
func NewExperiment(name string) *ExperimentCreates a new performance experiment with the given name.
Parameters:
name - Descriptive name for the experimentReturns: A new *Experiment instance
Example:
exp := gmeasure.NewExperiment("Database Query Performance"){ .api }
type Measurement struct {
Value float64
Unit string
// Additional fields for metadata
}Represents a single measurement result with its value and unit.
Fields:
Value - The numeric measurement valueUnit - The unit of measurement (e.g., "ms", "MB", "ops/sec"){ .api }
type Stats struct {
Min float64
Max float64
Mean float64
Median float64
StdDev float64
// Additional statistical fields
}Provides statistical summary of a collection of measurements.
Fields:
Min - Minimum measured valueMax - Maximum measured valueMean - Average of all measurementsMedian - Middle value when measurements are sortedStdDev - Standard deviation showing variabilityA specialized measurement type for time durations, extending Measurement with time-specific functionality.
{ .api }
func (e *Experiment) RecordDuration(name string, callback func()) DurationRecords the duration of executing a callback function.
Parameters:
name - Name for this measurementcallback - Function to measureReturns: A Duration measurement object
Example:
exp := gmeasure.NewExperiment("API Calls")
duration := exp.RecordDuration("fetch user", func() {
_, err := client.GetUser(userID)
Expect(err).NotTo(HaveOccurred())
}){ .api }
func (e *Experiment) RecordValue(name string, value float64, unit ...string) MeasurementRecords an arbitrary numeric measurement with an optional unit.
Parameters:
name - Name for this measurementvalue - Numeric value to recordunit - Optional unit of measurement (e.g., "MB", "requests", "items/sec")Returns: A Measurement object
Example:
exp := gmeasure.NewExperiment("Memory Usage")
// Record memory usage in megabytes
exp.RecordValue("heap allocation", 45.6, "MB")
// Record without unit
exp.RecordValue("cache hits", 1250){ .api }
func (e *Experiment) MeasureDuration(name string, callback func(), opts ...any) DurationMeasures the duration of executing a callback function with additional options for repeated measurements and statistical sampling.
Parameters:
name - Name for this measurementcallback - Function to measureopts - Optional configuration for sampling behaviorReturns: A Duration measurement object
Example:
exp := gmeasure.NewExperiment("Sort Performance")
// Measure multiple iterations
duration := exp.MeasureDuration("quicksort 10k items", func() {
sort.Ints(data)
}){ .api }
func (e *Experiment) GetStats(name string) StatsRetrieves statistical summary for a named measurement.
Parameters:
name - Name of the measurement to get stats forReturns: A Stats object with statistical analysis
Example:
stats := exp.GetStats("fetch user")
fmt.Printf("Mean: %.2fms\n", stats.Mean)
fmt.Printf("StdDev: %.2fms\n", stats.StdDev)
fmt.Printf("Min/Max: %.2fms / %.2fms\n", stats.Min, stats.Max){ .api }
func (e *Experiment) Reset()Clears all recorded measurements from the experiment, allowing it to be reused.
Example:
exp := gmeasure.NewExperiment("Cache Performance")
// First test run
for i := 0; i < 100; i++ {
exp.RecordDuration("cache lookup", func() {
cache.Get(key)
})
}
// Clear and run again with different data
exp.Reset()
for i := 0; i < 100; i++ {
exp.RecordDuration("cache lookup", func() {
cache.Get(differentKey)
})
}The gmeasure/table sub-package provides table formatting utilities for benchmark results.
{ .api }
func RenderTable(experiments []*gmeasure.Experiment) stringRenders experiment results as a formatted table string suitable for console output or reports.
Parameters:
experiments - Slice of experiments to renderReturns: A formatted table string
Example:
import (
"github.com/onsi/gomega/gmeasure"
"github.com/onsi/gomega/gmeasure/table"
)
exp1 := gmeasure.NewExperiment("Query Performance")
exp2 := gmeasure.NewExperiment("Insert Performance")
// ... perform measurements ...
output := table.RenderTable([]*gmeasure.Experiment{exp1, exp2})
fmt.Println(output)package mypackage_test
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gmeasure"
)
var _ = Describe("Performance Tests", func() {
It("benchmarks database queries", func() {
exp := gmeasure.NewExperiment("Database Queries")
// Measure read operations
for i := 0; i < 10; i++ {
exp.RecordDuration("SELECT query", func() {
db.Query("SELECT * FROM users WHERE id = ?", i)
})
}
// Measure write operations
for i := 0; i < 10; i++ {
exp.RecordDuration("INSERT query", func() {
db.Exec("INSERT INTO logs (message) VALUES (?)", "test")
})
}
// Get statistics
selectStats := exp.GetStats("SELECT query")
insertStats := exp.GetStats("INSERT query")
// Verify performance expectations
Expect(selectStats.Mean).To(BeNumerically("<", 10)) // Less than 10ms average
Expect(insertStats.Mean).To(BeNumerically("<", 20)) // Less than 20ms average
})
})var _ = Describe("Algorithm Comparison", func() {
It("compares sorting algorithms", func() {
exp := gmeasure.NewExperiment("Sorting Algorithms")
data := generateRandomData(10000)
// Benchmark quicksort
dataCopy := make([]int, len(data))
copy(dataCopy, data)
exp.RecordDuration("quicksort", func() {
quicksort(dataCopy)
})
// Benchmark mergesort
copy(dataCopy, data)
exp.RecordDuration("mergesort", func() {
mergesort(dataCopy)
})
// Compare performance
quickStats := exp.GetStats("quicksort")
mergeStats := exp.GetStats("mergesort")
fmt.Printf("QuickSort: %.2fms\n", quickStats.Mean)
fmt.Printf("MergeSort: %.2fms\n", mergeStats.Mean)
})
})var _ = Describe("Resource Usage", func() {
It("measures memory and CPU metrics", func() {
exp := gmeasure.NewExperiment("Resource Metrics")
// Measure memory allocations
var memBefore, memAfter runtime.MemStats
runtime.ReadMemStats(&memBefore)
// Perform memory-intensive operation
data := make([]byte, 1024*1024*10) // 10MB
runtime.ReadMemStats(&memAfter)
allocatedMB := float64(memAfter.Alloc-memBefore.Alloc) / 1024 / 1024
exp.RecordValue("memory allocated", allocatedMB, "MB")
// Measure processing rate
itemsProcessed := 5000
duration := exp.RecordDuration("batch processing", func() {
processBatch(data)
})
itemsPerSec := float64(itemsProcessed) / duration.Seconds()
exp.RecordValue("throughput", itemsPerSec, "items/sec")
// Verify metrics
stats := exp.GetStats("memory allocated")
Expect(stats.Mean).To(BeNumerically("<", 15)) // Less than 15MB
})
})var _ = Describe("Load Testing", func() {
It("performs statistical analysis on response times", func() {
exp := gmeasure.NewExperiment("HTTP Response Times")
// Simulate 100 requests
for i := 0; i < 100; i++ {
exp.RecordDuration("API request", func() {
resp, _ := http.Get("http://example.com/api")
resp.Body.Close()
})
}
stats := exp.GetStats("API request")
// Verify statistical properties
Expect(stats.Mean).To(BeNumerically("<", 100)) // Average < 100ms
Expect(stats.Median).To(BeNumerically("<", 80)) // Median < 80ms
Expect(stats.Max).To(BeNumerically("<", 500)) // No request > 500ms
Expect(stats.StdDev).To(BeNumerically("<", 50)) // Low variability
// Log detailed statistics
fmt.Printf(`
Performance Statistics:
Mean: %.2fms
Median: %.2fms
Min: %.2fms
Max: %.2fms
StdDev: %.2fms
`, stats.Mean, stats.Median, stats.Min, stats.Max, stats.StdDev)
})
})var _ = Describe("Performance Suite", func() {
It("runs multiple benchmarks and generates report", func() {
experiments := []*gmeasure.Experiment{}
// CPU-bound operations
cpuExp := gmeasure.NewExperiment("CPU Operations")
for i := 0; i < 20; i++ {
cpuExp.RecordDuration("fibonacci(30)", func() {
fibonacci(30)
})
cpuExp.RecordDuration("prime check", func() {
isPrime(982451653)
})
}
experiments = append(experiments, cpuExp)
// I/O operations
ioExp := gmeasure.NewExperiment("I/O Operations")
for i := 0; i < 20; i++ {
ioExp.RecordDuration("file read", func() {
ioutil.ReadFile("testdata/large.txt")
})
ioExp.RecordDuration("file write", func() {
ioutil.WriteFile("testdata/output.txt", data, 0644)
})
}
experiments = append(experiments, ioExp)
// Network operations
netExp := gmeasure.NewExperiment("Network Operations")
for i := 0; i < 20; i++ {
netExp.RecordDuration("HTTP GET", func() {
http.Get("http://example.com")
})
netExp.RecordDuration("DNS lookup", func() {
net.LookupHost("example.com")
})
}
experiments = append(experiments, netExp)
// Generate and print table report
report := table.RenderTable(experiments)
fmt.Println(report)
})
})var _ = Describe("Cache Performance", func() {
var exp *gmeasure.Experiment
BeforeEach(func() {
exp = gmeasure.NewExperiment("Cache Operations")
})
It("measures cache hits", func() {
// Warm up cache
for i := 0; i < 100; i++ {
cache.Set(fmt.Sprintf("key%d", i), i)
}
// Measure cache hits
for i := 0; i < 100; i++ {
exp.RecordDuration("cache hit", func() {
cache.Get(fmt.Sprintf("key%d", i))
})
}
stats := exp.GetStats("cache hit")
Expect(stats.Mean).To(BeNumerically("<", 1)) // Sub-millisecond
})
It("measures cache misses", func() {
// Don't reset - reuse the same experiment
// Measure cache misses
for i := 0; i < 100; i++ {
exp.RecordDuration("cache miss", func() {
cache.Get(fmt.Sprintf("missing_key%d", i))
})
}
missStats := exp.GetStats("cache miss")
hitStats := exp.GetStats("cache hit")
// Cache hits should be faster than misses
Expect(hitStats.Mean).To(BeNumerically("<", missStats.Mean))
})
It("measures after reset", func() {
// Clear all previous measurements
exp.Reset()
// Run fresh benchmark
for i := 0; i < 50; i++ {
exp.RecordDuration("fresh measurement", func() {
cache.Get("key0")
})
}
// Previous measurements are gone
stats := exp.GetStats("cache hit")
Expect(stats.Mean).To(BeZero()) // No data for this measurement
})
})Use clear, descriptive names for experiments and measurements:
// Good
exp := gmeasure.NewExperiment("Database Connection Pool Performance")
exp.RecordDuration("acquire connection from pool", func() { ... })
// Less clear
exp := gmeasure.NewExperiment("Test")
exp.RecordDuration("test1", func() { ... })Run measurements multiple times to get reliable statistics:
exp := gmeasure.NewExperiment("Function Performance")
// Run 50 iterations for statistical significance
for i := 0; i < 50; i++ {
exp.RecordDuration("function call", func() {
myFunction()
})
}
stats := exp.GetStats("function call")
// Now stats.Mean, StdDev are meaningfulUse meaningful units for custom measurements:
exp.RecordValue("throughput", rate, "requests/sec")
exp.RecordValue("memory usage", bytes/1024/1024, "MB")
exp.RecordValue("cache hit ratio", ratio*100, "%")Combine measurements with assertions to enforce performance requirements:
exp := gmeasure.NewExperiment("SLA Compliance")
for i := 0; i < 100; i++ {
exp.RecordDuration("API call", func() {
handleRequest()
})
}
stats := exp.GetStats("API call")
// Ensure 95th percentile is under 200ms
// Ensure no call exceeds 1 second
Expect(stats.Max).To(BeNumerically("<", 1000), "No call should exceed 1 second")Avoid mixing different types of operations in a single measurement:
// Good - separate measurements
exp.RecordDuration("database query", func() {
db.Query(...)
})
exp.RecordDuration("JSON parsing", func() {
json.Unmarshal(...)
})
// Avoid - mixed operations
exp.RecordDuration("query and parse", func() {
result := db.Query(...)
json.Unmarshal(result)
})Use gmeasure in your test suite to detect performance regressions:
It("maintains query performance", func() {
exp := gmeasure.NewExperiment("Query Performance")
for i := 0; i < 50; i++ {
exp.RecordDuration("user lookup", func() {
db.QueryUser(userID)
})
}
stats := exp.GetStats("user lookup")
// Fail if performance degrades
Expect(stats.Mean).To(BeNumerically("<", 5),
"User lookup should complete in under 5ms on average")
})Verify that optimizations actually improve performance:
Context("optimization impact", func() {
It("shows improved performance", func() {
expBefore := gmeasure.NewExperiment("Before Optimization")
expAfter := gmeasure.NewExperiment("After Optimization")
// Test old implementation
for i := 0; i < 100; i++ {
expBefore.RecordDuration("process", func() {
oldImplementation()
})
}
// Test new implementation
for i := 0; i < 100; i++ {
expAfter.RecordDuration("process", func() {
newImplementation()
})
}
before := expBefore.GetStats("process")
after := expAfter.GetStats("process")
improvement := (before.Mean - after.Mean) / before.Mean * 100
fmt.Printf("Performance improved by %.1f%%\n", improvement)
Expect(after.Mean).To(BeNumerically("<", before.Mean),
"Optimization should improve performance")
})
})Measure resource usage to inform capacity planning:
It("measures resource requirements", func() {
exp := gmeasure.NewExperiment("Load Test")
concurrent := 100
totalRequests := 1000
start := time.Now()
for i := 0; i < totalRequests; i++ {
exp.RecordDuration("request", func() {
processRequest()
})
}
elapsed := time.Since(start)
stats := exp.GetStats("request")
throughput := float64(totalRequests) / elapsed.Seconds()
exp.RecordValue("throughput", throughput, "req/sec")
fmt.Printf("System can handle %.0f requests/sec\n", throughput)
fmt.Printf("Average latency: %.2fms\n", stats.Mean)
})