Sampling reduces log volume by selectively logging events based on frequency, probability, or level. This is useful for high-throughput applications where logging every event would create too much overhead or data.
import (
"time"
"github.com/rs/zerolog"
)The Sampler interface determines whether a log event should be written or dropped.
type Sampler interface {
// Sample returns true if event should be logged, false to drop
Sample(lvl Level) bool
}Use the Sample() method on Logger to create a child logger with sampling:
// Create child logger with sampler
func (l Logger) Sample(s Sampler) LoggerExample:
sampler := &zerolog.BasicSampler{N: 10}
sampledLogger := logger.Sample(sampler)
// Only every 10th message is logged
for i := 0; i < 100; i++ {
sampledLogger.Info().Int("iteration", i).Msg("iteration")
}
// Only ~10 messages are actually writtenLogs every Nth event deterministically, regardless of level. Thread-safe.
type BasicSampler struct {
N uint32 // Sample every N events (1 = all, 10 = every 10th, 0 = none)
}
func (s *BasicSampler) Sample(lvl Level) boolExample:
// Log every 10th event
sampler := &zerolog.BasicSampler{N: 10}
logger := logger.Sample(sampler)
for i := 0; i < 100; i++ {
logger.Info().Int("i", i).Msg("event")
}
// Events 1, 11, 21, 31, 41, 51, 61, 71, 81, 91 are loggedSpecial values:
N = 0: No events are logged (all dropped)N = 1: All events are logged (no sampling)N = 10: Every 10th event is logged (90% dropped)Thread safety: Uses atomic operations, safe for concurrent use.
Probabilistically samples events using pseudo-random number generation. Each event has a 1/N chance of being logged.
type RandomSampler uint32
func (s RandomSampler) Sample(lvl Level) boolExample:
// ~10% of events are logged
sampler := zerolog.RandomSampler(10)
logger := logger.Sample(sampler)
for i := 0; i < 100; i++ {
logger.Info().Int("i", i).Msg("event")
}
// Approximately 10 random events are loggedPredefined constants:
var (
// Sample ~10% of events (1 in 10)
Often = RandomSampler(10)
// Sample ~1% of events (1 in 100)
Sometimes = RandomSampler(100)
// Sample ~0.1% of events (1 in 1000)
Rarely = RandomSampler(1000)
)Example with constants:
// Use predefined samplers
logger := logger.Sample(zerolog.Often) // ~10%
logger := logger.Sample(zerolog.Sometimes) // ~1%
logger := logger.Sample(zerolog.Rarely) // ~0.1%Note: Uses math/rand, not cryptographically secure. Distribution is approximate.
Allows a burst of events within a time period, then delegates to another sampler. Useful for allowing initial events while rate-limiting subsequent ones.
type BurstSampler struct {
// Maximum number of events per period before using NextSampler
Burst uint32
// Time period for burst counting
Period time.Duration
// Sampler to use after burst is reached (nil = reject all)
NextSampler Sampler
}
func (s *BurstSampler) Sample(lvl Level) boolExample:
// Allow first 5 events per second, then sample 1 in 10
sampler := &zerolog.BurstSampler{
Burst: 5,
Period: 1 * time.Second,
NextSampler: &zerolog.BasicSampler{N: 10},
}
logger := logger.Sample(sampler)
// First 5 events each second: logged
// Subsequent events each second: 1 in 10 loggedAllow burst then drop:
// Allow first 10 events per second, drop the rest
sampler := &zerolog.BurstSampler{
Burst: 10,
Period: 1 * time.Second,
NextSampler: nil, // nil = drop all after burst
}
logger := logger.Sample(sampler)Behavior:
Burst = 0 or Period = 0, immediately delegates to NextSamplerNextSampler = nil, all events after burst are droppedApplies different samplers to different log levels, allowing fine-grained control over sampling strategy per severity.
type LevelSampler struct {
TraceSampler Sampler
DebugSampler Sampler
InfoSampler Sampler
WarnSampler Sampler
ErrorSampler Sampler
}
func (s LevelSampler) Sample(lvl Level) boolExample:
// Different sampling rates per level
sampler := zerolog.LevelSampler{
TraceSampler: &zerolog.BasicSampler{N: 100}, // 1% of trace
DebugSampler: &zerolog.BasicSampler{N: 10}, // 10% of debug
InfoSampler: &zerolog.BasicSampler{N: 1}, // 100% of info
WarnSampler: nil, // 100% of warn (nil = always log)
ErrorSampler: nil, // 100% of error
}
logger := logger.Sample(sampler)
logger.Trace().Msg("trace") // 1% chance
logger.Debug().Msg("debug") // 10% chance
logger.Info().Msg("info") // Always logged
logger.Warn().Msg("warn") // Always logged
logger.Error().Msg("error") // Always loggedExample with random sampling:
// Random sampling per level
sampler := zerolog.LevelSampler{
TraceSampler: zerolog.Rarely, // 0.1%
DebugSampler: zerolog.Sometimes, // 1%
InfoSampler: zerolog.Often, // 10%
WarnSampler: nil, // 100%
ErrorSampler: nil, // 100%
}
logger := logger.Sample(sampler)Example with burst sampling:
// Burst for debug, always log errors
sampler := zerolog.LevelSampler{
DebugSampler: &zerolog.BurstSampler{
Burst: 5,
Period: 1 * time.Second,
NextSampler: &zerolog.BasicSampler{N: 10},
},
InfoSampler: &zerolog.BasicSampler{N: 1},
ErrorSampler: nil, // Always log errors
}
logger := logger.Sample(sampler)Behavior:
nil, events at that level are always logged// Globally disable all sampling (for debugging)
func DisableSampling(v bool)Example:
// Temporarily disable all sampling
zerolog.DisableSampling(true)
defer zerolog.DisableSampling(false)
// All samplers now pass through every event
logger.Info().Msg("always logged even with sampler")// Sample debug heavily, log all errors
sampler := zerolog.LevelSampler{
DebugSampler: &zerolog.BasicSampler{N: 100}, // 1% of debug
InfoSampler: &zerolog.BasicSampler{N: 10}, // 10% of info
ErrorSampler: nil, // All errors
}
logger := zerolog.New(os.Stdout).
Sample(sampler).
Level(zerolog.DebugLevel)
for i := 0; i < 10000; i++ {
logger.Debug().Int("i", i).Msg("debug iteration") // Only ~100 logged
}// Allow burst of errors, then rate limit
errorSampler := &zerolog.BurstSampler{
Burst: 10,
Period: 1 * time.Minute,
NextSampler: &zerolog.BasicSampler{N: 10},
}
logger := logger.Sample(errorSampler).Level(zerolog.ErrorLevel)
// First 10 errors per minute: all logged
// Subsequent errors: 1 in 10 logged// Use random sampling for approximate log volume
logger := logger.Sample(zerolog.Sometimes) // ~1% of events
for i := 0; i < 10000; i++ {
logger.Info().Int("i", i).Msg("iteration")
}
// Approximately 100 events logged// Different sampling for different components
func newComponentLogger(name string, baseLogger zerolog.Logger) zerolog.Logger {
var sampler zerolog.Sampler
switch name {
case "database":
// Database logs are expensive, sample heavily
sampler = &zerolog.BasicSampler{N: 100}
case "api":
// API logs are important, light sampling
sampler = &zerolog.BasicSampler{N: 10}
case "auth":
// Auth logs should all be kept
sampler = nil
default:
sampler = zerolog.Often
}
logger := baseLogger.With().Str("component", name).Logger()
if sampler != nil {
logger = logger.Sample(sampler)
}
return logger
}
dbLogger := newComponentLogger("database", baseLogger)
apiLogger := newComponentLogger("api", baseLogger)
authLogger := newComponentLogger("auth", baseLogger)// Burst for initial events, then heavy sampling
sampler := &zerolog.BurstSampler{
Burst: 100,
Period: 10 * time.Second,
NextSampler: &zerolog.BasicSampler{N: 100},
}
logger := logger.Sample(sampler)
// First 100 events per 10s: all logged
// Remaining events: 1 in 100 loggedImplement the Sampler interface for custom sampling logic:
type TimeSampler struct {
StartHour int
EndHour int
}
func (s TimeSampler) Sample(lvl zerolog.Level) bool {
hour := time.Now().Hour()
// Only log during business hours
return hour >= s.StartHour && hour < s.EndHour
}
// Use custom sampler
sampler := TimeSampler{StartHour: 9, EndHour: 17}
logger := logger.Sample(sampler)Example: Level-threshold sampler:
type MinLevelSampler struct {
MinLevel zerolog.Level
}
func (s MinLevelSampler) Sample(lvl zerolog.Level) bool {
// Only sample events at or above minimum level
return lvl >= s.MinLevel
}
sampler := MinLevelSampler{MinLevel: zerolog.WarnLevel}
logger := logger.Sample(sampler)
// Only Warn, Error, Fatal, Panic are loggedDon't sample errors unless you have a good reason:
// Good
sampler := zerolog.LevelSampler{
DebugSampler: &zerolog.BasicSampler{N: 10},
InfoSampler: &zerolog.BasicSampler{N: 5},
ErrorSampler: nil, // Always log errors
}For reproducible behavior, prefer BasicSampler over RandomSampler:
// Deterministic - same events logged each run
sampler := &zerolog.BasicSampler{N: 10}
// Non-deterministic - different events each run
sampler := zerolog.RandomSampler(10)Apply sampling to loggers that generate high volume:
// High-volume component
dbLogger := logger.With().Str("component", "db").Logger()
dbLogger = dbLogger.Sample(&zerolog.BasicSampler{N: 100})
// Normal volume - no sampling
apiLogger := logger.With().Str("component", "api").Logger()Prevent error log storms with burst sampling:
sampler := &zerolog.BurstSampler{
Burst: 50,
Period: 1 * time.Minute,
NextSampler: &zerolog.BasicSampler{N: 100},
}
errorLogger := logger.Sample(sampler).Level(zerolog.ErrorLevel)Track dropped logs with custom samplers:
type CountingSampler struct {
Inner zerolog.Sampler
Sampled uint64
Dropped uint64
}
func (s *CountingSampler) Sample(lvl zerolog.Level) bool {
if s.Inner.Sample(lvl) {
atomic.AddUint64(&s.Sampled, 1)
return true
}
atomic.AddUint64(&s.Dropped, 1)
return false
}Disable sampling during tests to see all logs:
func TestSomething(t *testing.T) {
zerolog.DisableSampling(true)
defer zerolog.DisableSampling(false)
// All logs visible during test
logger.Debug().Msg("test message")
}BasicSampler uses atomic counter (thread-safe, low contention)RandomSampler uses math/rand (may contend under high load)BurstSampler uses atomic operations for thread safety