Performance benchmark testing for measuring Grafana operation performance and collecting application statistics during e2e test execution.
Run performance benchmarks with configurable parameters for measuring Grafana dashboard and application performance.
/**
* Run performance benchmarks with dashboard configuration
* @param args - Benchmark configuration object
*/
function benchmark(args: BenchmarkArguments): void;
interface BenchmarkArguments {
/** Benchmark test name */
name: string;
/** Dashboard configuration for benchmark */
dashboard: DashboardBenchmarkConfig;
/** Number of test iterations to run */
repeat: number;
/** Duration per test iteration in milliseconds */
duration: number;
/** Optional app statistics collection */
appStats?: AppStatsConfig;
/** Skip benchmark execution if true */
skipScenario?: boolean;
}
interface DashboardBenchmarkConfig {
/** Dashboard folder path */
folder: string;
/** Delay after opening dashboard in milliseconds */
delayAfterOpening: number;
/** Skip panel validation during benchmark */
skipPanelValidation: boolean;
}
interface AppStatsConfig {
/** Function to start collecting application statistics */
startCollecting?(window: Window): void;
/** Function to collect and return application statistics */
collect(window: Window): Record<string, unknown>;
}Usage Examples:
import { e2e } from "@grafana/e2e";
// Basic benchmark test
e2e.benchmark({
name: "Dashboard Load Performance",
dashboard: {
folder: "dashboards/performance-test",
delayAfterOpening: 1000,
skipPanelValidation: false
},
repeat: 5,
duration: 30000
});
// Benchmark with app statistics collection
e2e.benchmark({
name: "Memory Usage Benchmark",
dashboard: {
folder: "dashboards/memory-test",
delayAfterOpening: 2000,
skipPanelValidation: true
},
repeat: 10,
duration: 60000,
appStats: {
startCollecting: (window: Window) => {
// Start memory monitoring
console.log("Starting memory collection");
},
collect: (window: Window) => {
// Return collected metrics
return {
memoryUsage: (window as any).performance?.memory?.usedJSHeapSize || 0,
timestamp: Date.now()
};
}
}
});
// Conditional benchmark execution
e2e.benchmark({
name: "Conditional Performance Test",
dashboard: {
folder: "dashboards/load-test",
delayAfterOpening: 500,
skipPanelValidation: false
},
repeat: 3,
duration: 15000,
skipScenario: process.env.SKIP_BENCHMARKS === "true"
});Configure how dashboards are loaded and validated during benchmarks:
Control benchmark test execution:
Optional application statistics collection during benchmark execution:
Benchmarks can be integrated with regular test scenarios:
import { e2e } from "@grafana/e2e";
e2e.scenario({
describeName: "Performance Tests",
itName: "should measure dashboard performance",
scenario: () => {
// Setup test data
e2e.flows.addDashboard({
title: "Performance Test Dashboard"
});
// Run benchmark
e2e.benchmark({
name: "Dashboard Rendering",
dashboard: {
folder: "current-dashboard",
delayAfterOpening: 1000,
skipPanelValidation: false
},
repeat: 3,
duration: 10000
});
// Cleanup
e2e.flows.revertAllChanges();
}
});Benchmarks can be configured via environment variables:
# Disable command logging for cleaner benchmark output
CYPRESS_NO_COMMAND_LOG=1
# Skip benchmark tests in CI
SKIP_BENCHMARKS=true
# Set benchmark iterations
BENCHMARK_REPEAT=10Access environment variables in benchmark configuration:
e2e.benchmark({
name: "Configurable Benchmark",
dashboard: {
folder: "dashboards/test",
delayAfterOpening: 1000,
skipPanelValidation: false
},
repeat: parseInt(process.env.BENCHMARK_REPEAT || "5"),
duration: 30000,
skipScenario: process.env.CI === "true"
});