tessl install tessl/golang-cloud-google-com--go--logging@1.13.0Cloud Logging client library for Go that enables writing log entries to Google Cloud Logging service with buffered asynchronous and synchronous logging capabilities.
This document describes error handling in the Cloud Logging library, including error variables, the Client.OnError callback, and how to handle common error scenarios.
var (
ErrRedirectProtoPayloadNotSupported error
ErrOverflow error
ErrOversizedEntry error
)The logging package defines three error variables for common error conditions:
var ErrRedirectProtoPayloadNotSupported = errors.New("printEntryToStdout: cannot find valid payload")Returned when a Logger is configured to redirect output using RedirectAsJSON and tries to redirect logs with a protobuf payload. Protobuf payloads (anypb.Any) cannot be serialized to JSON for output redirection.
Example:
import (
"os"
"google.golang.org/protobuf/types/known/anypb"
)
logger := client.Logger("my-log",
logging.RedirectAsJSON(os.Stdout),
)
var payload anypb.Any
// ... marshal protobuf message to payload ...
// This will trigger ErrRedirectProtoPayloadNotSupported
logger.Log(logging.Entry{
Payload: &payload,
})
// Error reported via Client.OnError callbackvar ErrOverflow = bundler.ErrOverflowSignals that the number of buffered entries for a Logger exceeds its BufferedByteLimit. This error occurs when log entries are generated faster than they can be sent to Cloud Logging, causing the buffer to fill up.
Example:
// Set a small buffer limit for demonstration
logger := client.Logger("my-log",
logging.BufferedByteLimit(1024), // Very small buffer
)
// Generate many log entries quickly
for i := 0; i < 10000; i++ {
logger.Log(logging.Entry{
Payload: fmt.Sprintf("Log entry %d with some content", i),
})
}
// ErrOverflow may be reported if entries exceed buffer limitDefault Buffer Limit:
The default BufferedByteLimit is 1 GiB (DefaultBufferedByteLimit = 1 << 30). This is generous enough for most applications.
Handling Overflow:
When overflow occurs, log entries are dropped to prevent unbounded memory growth. To handle this:
BufferedByteLimit if you have memory availableClient.OnErrorLogSync for critical entries that must not be droppedvar ErrOversizedEntry = bundler.ErrOversizedItemSignals that an entry's size exceeds the maximum number of bytes that will be sent in a single call to the logging service. This error is only returned if you have set an EntryByteLimit on the Logger.
Example:
logger := client.Logger("my-log",
logging.EntryByteLimit(1024), // 1 KB limit per entry
)
// Try to log a large entry
largePayload := strings.Repeat("a", 2000) // 2 KB payload
logger.Log(logging.Entry{
Payload: largePayload,
})
// ErrOversizedEntry will be reported via Client.OnErrorDefault Behavior:
By default, EntryByteLimit is zero (no limit). Cloud Logging service has its own limits:
DefaultBundleByteLimit)type Client struct {
OnError func(err error)
}The Client.OnError callback is called when an error occurs in a call to Log or Flush. The error may be due to:
ErrOverflow)ErrOversizedEntry)ErrRedirectProtoPayloadNotSupported)OnError is called with errors from all Loggers created by the Client. It is never called concurrently and should return quickly.
Default Behavior:
If OnError is not set, the default behavior is to call log.Printf:
client.OnError = func(e error) {
log.Printf("logging client: %v", e)
}Setting Custom Error Handler:
Set OnError before creating any loggers or calling any Client methods:
ctx := context.Background()
client, err := logging.NewClient(ctx, "my-project")
if err != nil {
// Handle error
}
defer client.Close()
// Set custom error handler BEFORE creating loggers
client.OnError = func(err error) {
// Custom error handling
if errors.Is(err, logging.ErrOverflow) {
metrics.IncrementCounter("logging.overflow")
log.Printf("CRITICAL: Logging buffer overflow")
} else if errors.Is(err, logging.ErrOversizedEntry) {
metrics.IncrementCounter("logging.oversized")
log.Printf("WARNING: Oversized log entry")
} else {
log.Printf("Logging error: %v", err)
}
}
// Now create loggers
logger := client.Logger("my-log")var errorCount int64
client.OnError = func(err error) {
atomic.AddInt64(&errorCount, 1)
log.Printf("Logging error: %v", err)
}
// Periodically check error count
go func() {
ticker := time.NewTicker(1 * time.Minute)
for range ticker.C {
count := atomic.LoadInt64(&errorCount)
if count > 0 {
log.Printf("Total logging errors in last minute: %d", count)
atomic.StoreInt64(&errorCount, 0)
}
}
}()var (
overflowErrors int64
oversizedErrors int64
networkErrors int64
otherErrors int64
)
client.OnError = func(err error) {
switch {
case errors.Is(err, logging.ErrOverflow):
atomic.AddInt64(&overflowErrors, 1)
case errors.Is(err, logging.ErrOversizedEntry):
atomic.AddInt64(&oversizedErrors, 1)
case errors.Is(err, logging.ErrRedirectProtoPayloadNotSupported):
atomic.AddInt64(&otherErrors, 1)
default:
// Likely network or service error
atomic.AddInt64(&networkErrors, 1)
}
}client.OnError = func(err error) {
if errors.Is(err, logging.ErrOverflow) {
// Overflow is critical - logs are being dropped
sendAlert("CRITICAL: Cloud Logging buffer overflow - logs are being dropped!")
}
// Log all errors
log.Printf("Logging error: %v", err)
}
func sendAlert(message string) {
// Send to alerting system (PagerDuty, email, etc.)
}var (
errorCount int64
lastErrorTime time.Time
degradedMode bool
mu sync.Mutex
)
client.OnError = func(err error) {
mu.Lock()
defer mu.Unlock()
errorCount++
lastErrorTime = time.Now()
// Enter degraded mode after 10 errors in 1 minute
if errorCount >= 10 && time.Since(lastErrorTime) < time.Minute {
if !degradedMode {
degradedMode = true
log.Println("Entering degraded logging mode")
// Reduce logging volume, increase buffer limits, etc.
}
}
log.Printf("Logging error: %v", err)
}
// Reset error count periodically
go func() {
ticker := time.NewTicker(1 * time.Minute)
for range ticker.C {
mu.Lock()
errorCount = 0
degradedMode = false
mu.Unlock()
}
}()LogSync returns errors directly instead of using the OnError callback:
ctx := context.Background()
err := logger.LogSync(ctx, logging.Entry{
Payload: "critical event",
Severity: logging.Critical,
})
if err != nil {
// Handle error immediately
log.Printf("Failed to log critical event: %v", err)
// Maybe write to local file as backup
writeToLocalLog("critical event")
}Flush returns summary error information:
logger := client.Logger("my-log")
// Log many entries
for i := 0; i < 1000; i++ {
logger.Log(logging.Entry{
Payload: fmt.Sprintf("Entry %d", i),
})
}
// Flush and check for errors
err := logger.Flush()
if err != nil {
// Error contains summary info about failures
log.Printf("Flush encountered errors: %v", err)
}Note: For accurate error reporting, use Client.OnError instead of relying on Flush errors.
logger := client.Logger("my-log",
logging.BufferedByteLimit(2 << 30), // 2 GiB
)// Sample logs
if rand.Float64() < 0.1 { // 10% sampling
logger.Log(entry)
}logger := client.Logger("my-log",
logging.DelayThreshold(500*time.Millisecond), // Flush more frequently
logging.EntryCountThreshold(500), // Flush at 500 entries
)logger := client.Logger("my-log",
logging.ConcurrentWriteLimit(10), // Use 10 concurrent writers
)Don't Set EntryByteLimit (default is no limit)
Truncate Large Payloads:
func truncatePayload(payload string, maxSize int) string {
if len(payload) > maxSize {
return payload[:maxSize] + "...[truncated]"
}
return payload
}
logger.Log(logging.Entry{
Payload: truncatePayload(largeMessage, 200*1024), // 200 KB max
})Don't use RedirectAsJSON with protobuf payloads, or convert protobuf to JSON first:
import (
"google.golang.org/protobuf/encoding/protojson"
)
func logProtoMessage(logger *logging.Logger, msg proto.Message) {
// Convert protobuf to JSON string
jsonBytes, err := protojson.Marshal(msg)
if err != nil {
// Handle error
return
}
logger.Log(logging.Entry{
Payload: json.RawMessage(jsonBytes),
})
}package main
import (
"context"
"errors"
"log"
"sync/atomic"
"time"
"cloud.google.com/go/logging"
)
var (
totalErrors int64
overflowErrors int64
)
func main() {
ctx := context.Background()
client, err := logging.NewClient(ctx, "my-project")
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
defer client.Close()
// Set custom error handler
client.OnError = func(err error) {
atomic.AddInt64(&totalErrors, 1)
if errors.Is(err, logging.ErrOverflow) {
atomic.AddInt64(&overflowErrors, 1)
log.Printf("CRITICAL: Buffer overflow - logs being dropped!")
} else if errors.Is(err, logging.ErrOversizedEntry) {
log.Printf("WARNING: Oversized entry dropped")
} else {
log.Printf("Logging error: %v", err)
}
}
// Create logger with reasonable limits
logger := client.Logger("app-log",
logging.BufferedByteLimit(512<<20), // 512 MiB buffer
logging.DelayThreshold(time.Second), // Flush every second
logging.ConcurrentWriteLimit(5), // 5 concurrent writers
)
// Start error monitoring
go monitorErrors()
// Application code
for i := 0; i < 10000; i++ {
logger.Log(logging.Entry{
Payload: map[string]interface{}{"count": i},
Severity: logging.Info,
})
}
// Flush before exit
if err := logger.Flush(); err != nil {
log.Printf("Flush errors: %v", err)
}
}
func monitorErrors() {
ticker := time.NewTicker(10 * time.Second)
for range ticker.C {
total := atomic.LoadInt64(&totalErrors)
overflow := atomic.LoadInt64(&overflowErrors)
if total > 0 {
log.Printf("Logging errors in last 10s: %d (overflow: %d)", total, overflow)
}
atomic.StoreInt64(&totalErrors, 0)
atomic.StoreInt64(&overflowErrors, 0)
}
}