A Golang job scheduling library that lets you run Go functions at pre-determined intervals using cron expressions, fixed durations, daily, weekly, monthly, or one-time schedules with support for distributed deployments.
Common scheduled tasks for web applications: cache refresh, session cleanup, metrics collection.
// Refresh cache every 5 minutes
j, _ := s.NewJob(
gocron.DurationJob(5*time.Minute),
gocron.NewTask(func() {
fmt.Println("Refreshing cache...")
data := fetchDataFromDatabase()
cache.Set("app:data", data, 10*time.Minute)
fmt.Println("Cache refreshed")
}),
gocron.WithName("cache-refresh"),
gocron.WithSingletonMode(gocron.LimitModeReschedule),
)// Refresh cache before expiry
j, _ := s.NewJob(
gocron.DurationJob(4*time.Minute),
gocron.NewTask(func() {
// Cache TTL is 5 minutes, refresh at 4 minutes
data := fetchDataFromDatabase()
cache.Set("app:data", data, 5*time.Minute)
}),
gocron.WithName("swr-cache"),
gocron.WithIntervalFromCompletion(),
)// Warm L1 and L2 caches
j, _ := s.NewJob(
gocron.DurationJob(10*time.Minute),
gocron.NewTask(func() error {
// Warm L2 cache (Redis)
data, err := fetchFromDatabase()
if err != nil {
return fmt.Errorf("database fetch failed: %w", err)
}
cache.L2.Set("data", data, 15*time.Minute)
// Warm L1 cache (in-memory)
cache.L1.Set("data", data, 5*time.Minute)
log.Println("Multi-tier cache warmed")
return nil
}),
gocron.WithName("multi-tier-cache-warm"),
gocron.WithSingletonMode(gocron.LimitModeReschedule),
gocron.WithEventListeners(
gocron.AfterJobRunsWithError(func(jobID uuid.UUID, jobName string, err error) {
log.Printf("Cache warming failed: %v", err)
alertOps("cache-warming-failure", err)
}),
),
)// Only refresh if cache is stale
j, _ := s.NewJob(
gocron.DurationJob(2*time.Minute),
gocron.NewTask(func() {
if cache.IsFresh("app:data") {
log.Println("Cache still fresh, skipping refresh")
return
}
log.Println("Cache stale, refreshing...")
data := fetchDataFromDatabase()
cache.Set("app:data", data, 10*time.Minute)
}),
gocron.WithName("conditional-cache-refresh"),
)// Clean up expired sessions every hour
j, _ := s.NewJob(
gocron.DurationJob(time.Hour),
gocron.NewTask(func() {
count := 0
expiredSessions := db.Query("SELECT id FROM sessions WHERE expires_at < NOW()")
for _, session := range expiredSessions {
db.Delete("sessions", session.ID)
cache.Delete("session:" + session.ID)
count++
}
log.Printf("Cleaned up %d expired sessions", count)
metrics.RecordSessionCleanup(count)
}),
gocron.WithName("session-cleanup"),
gocron.WithSingletonMode(gocron.LimitModeReschedule),
)// Remove inactive sessions daily at 3 AM
j, _ := s.NewJob(
gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(3, 0, 0))),
gocron.NewTask(func() {
inactiveThreshold := time.Now().Add(-30 * 24 * time.Hour) // 30 days
result := db.Exec(`
DELETE FROM sessions
WHERE last_activity < ?
AND expires_at > NOW()
`, inactiveThreshold)
count := result.RowsAffected
log.Printf("Pruned %d inactive sessions", count)
// Alert if too many inactive sessions
if count > 10000 {
alertOps("high-inactive-sessions", fmt.Sprintf("%d sessions pruned", count))
}
}),
gocron.WithName("session-prune"),
)// Compact session store weekly
j, _ := s.NewJob(
gocron.WeeklyJob(
1,
gocron.NewWeekdays(time.Sunday),
gocron.NewAtTimes(gocron.NewAtTime(4, 0, 0)),
),
gocron.NewTask(func() {
log.Println("Starting session store compaction")
// Remove orphaned session data
db.Exec("DELETE FROM session_data WHERE session_id NOT IN (SELECT id FROM sessions)")
// Vacuum session tables
db.Exec("VACUUM ANALYZE sessions")
db.Exec("VACUUM ANALYZE session_data")
log.Println("Session store compaction complete")
}),
gocron.WithName("session-compaction"),
)// Aggregate and flush metrics every minute
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(func() {
metrics := collectApplicationMetrics()
// Send to metrics backend
prometheus.RecordMetrics(metrics)
// Also store in database for historical analysis
db.Insert("metrics", map[string]interface{}{
"timestamp": time.Now(),
"active_users": metrics.ActiveUsers,
"requests_total": metrics.RequestsTotal,
"error_rate": metrics.ErrorRate,
})
}),
gocron.WithName("metrics-aggregation"),
gocron.WithSingletonMode(gocron.LimitModeReschedule),
)// Roll up hourly analytics every hour
j, _ := s.NewJob(
gocron.CronJob("0 * * * *", false), // Every hour at minute 0
gocron.NewTask(func() {
startTime := time.Now().Add(-time.Hour).Truncate(time.Hour)
endTime := startTime.Add(time.Hour)
analytics := db.Query(`
SELECT
COUNT(DISTINCT user_id) as unique_users,
COUNT(*) as total_events,
event_type
FROM events
WHERE created_at >= ? AND created_at < ?
GROUP BY event_type
`, startTime, endTime)
for _, row := range analytics {
db.Insert("analytics_hourly", map[string]interface{}{
"hour": startTime,
"event_type": row.EventType,
"unique_users": row.UniqueUsers,
"total_events": row.TotalEvents,
})
}
log.Printf("Rolled up analytics for %v", startTime)
}),
gocron.WithName("analytics-rollup"),
)// Take performance snapshot every 5 minutes
j, _ := s.NewJob(
gocron.DurationJob(5*time.Minute),
gocron.NewTask(func() {
snapshot := PerformanceSnapshot{
Timestamp: time.Now(),
ResponseTimeP50: metrics.GetPercentile("response_time", 0.50),
ResponseTimeP95: metrics.GetPercentile("response_time", 0.95),
ResponseTimeP99: metrics.GetPercentile("response_time", 0.99),
ErrorRate: metrics.GetRate("errors"),
RequestRate: metrics.GetRate("requests"),
CPUUsage: system.GetCPUUsage(),
MemoryUsage: system.GetMemoryUsage(),
}
db.Insert("performance_snapshots", snapshot)
// Alert on degraded performance
if snapshot.ResponseTimeP95 > 1000 { // > 1s
alertOps("slow-response-time", fmt.Sprintf("P95: %.2fms", snapshot.ResponseTimeP95))
}
}),
gocron.WithName("performance-snapshot"),
)// Reset rate limit counters every minute
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(func() {
// Reset sliding window counters
currentMinute := time.Now().Truncate(time.Minute)
previousMinute := currentMinute.Add(-time.Minute)
cache.Delete("rate_limit:" + previousMinute.Format("2006-01-02T15:04"))
// Clean up old rate limit data
cutoff := time.Now().Add(-10 * time.Minute)
pattern := "rate_limit:*"
keys := cache.Keys(pattern)
for _, key := range keys {
if isOlderThan(key, cutoff) {
cache.Delete(key)
}
}
}),
gocron.WithName("rate-limit-reset"),
)// Reset daily API quotas at midnight
j, _ := s.NewJob(
gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(0, 0, 0))),
gocron.NewTask(func() {
log.Println("Resetting daily API quotas")
// Reset all user quotas
db.Exec("UPDATE api_keys SET daily_usage = 0")
// Clear cache
cache.DeletePattern("api_quota:*")
// Log quota usage
stats := db.QueryOne(`
SELECT
COUNT(*) as total_keys,
SUM(daily_usage) as total_requests,
AVG(daily_usage) as avg_requests
FROM api_keys
`)
log.Printf("Daily API stats: %d keys, %d total requests, %.0f avg requests",
stats.TotalKeys, stats.TotalRequests, stats.AvgRequests)
}),
gocron.WithName("daily-quota-reset"),
)// Process email queue every 30 seconds
j, _ := s.NewJob(
gocron.DurationJob(30*time.Second),
gocron.NewTask(func() {
batchSize := 50
emails := db.Query(`
SELECT * FROM email_queue
WHERE status = 'pending'
ORDER BY priority DESC, created_at ASC
LIMIT ?
`, batchSize)
for _, email := range emails {
err := sendEmail(email)
if err != nil {
// Mark as failed, will retry later
db.Exec(`
UPDATE email_queue
SET status = 'failed', attempts = attempts + 1, error = ?
WHERE id = ?
`, err.Error(), email.ID)
} else {
// Mark as sent
db.Exec(`
UPDATE email_queue
SET status = 'sent', sent_at = NOW()
WHERE id = ?
`, email.ID)
}
}
log.Printf("Processed %d emails", len(emails))
}),
gocron.WithName("email-queue"),
gocron.WithSingletonMode(gocron.LimitModeReschedule),
gocron.WithIntervalFromCompletion(),
)// Retry failed emails every 5 minutes
j, _ := s.NewJob(
gocron.DurationJob(5*time.Minute),
gocron.NewTask(func() {
emails := db.Query(`
SELECT * FROM email_queue
WHERE status = 'failed'
AND attempts < 3
AND created_at > NOW() - INTERVAL '1 day'
`)
for _, email := range emails {
err := sendEmail(email)
if err == nil {
db.Exec("UPDATE email_queue SET status = 'sent', sent_at = NOW() WHERE id = ?", email.ID)
} else {
db.Exec("UPDATE email_queue SET attempts = attempts + 1 WHERE id = ?", email.ID)
}
}
}),
gocron.WithName("email-retry"),
)package main
import (
"fmt"
"log"
"time"
"github.com/go-co-op/gocron/v2"
)
func main() {
s, _ := gocron.NewScheduler()
defer s.Shutdown()
// Cache refresh every 5 minutes
s.NewJob(
gocron.DurationJob(5*time.Minute),
gocron.NewTask(func() {
data := fetchFromDatabase()
cache.Set("app:data", data, 10*time.Minute)
log.Println("Cache refreshed")
}),
gocron.WithName("cache-refresh"),
gocron.WithSingletonMode(gocron.LimitModeReschedule),
)
// Session cleanup hourly
s.NewJob(
gocron.DurationJob(time.Hour),
gocron.NewTask(func() {
count := cleanupExpiredSessions()
log.Printf("Cleaned up %d sessions", count)
}),
gocron.WithName("session-cleanup"),
)
// Metrics every minute
s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(func() {
metrics := collectMetrics()
sendToPrometheus(metrics)
}),
gocron.WithName("metrics-collection"),
)
// Email queue every 30 seconds
s.NewJob(
gocron.DurationJob(30*time.Second),
gocron.NewTask(func() {
processEmailQueue()
}),
gocron.WithName("email-queue"),
gocron.WithIntervalFromCompletion(),
)
// Daily analytics rollup at 1 AM
s.NewJob(
gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(1, 0, 0))),
gocron.NewTask(func() {
rollupDailyAnalytics()
}),
gocron.WithName("daily-analytics"),
)
s.Start()
log.Println("Web app scheduler started")
select {}
}Install with Tessl CLI
npx tessl i tessl/golang-github-com-go-co-op-gocron-v2docs
api
examples
guides