A Golang job scheduling library that lets you run Go functions at pre-determined intervals using cron expressions, fixed durations, daily, weekly, monthly, or one-time schedules with support for distributed deployments.
Locker interface, implementations, and lock keys for per-job mutual exclusion.
Distributed locking provides per-job mutual exclusion across multiple instances. Unlike leader election (all-or-nothing), distributed locking allows different jobs to run on different instances while ensuring each job runs on only one instance at a time.
type Locker interface {
Lock(ctx context.Context, key string) (Lock, error)
}
type Lock interface {
Unlock(ctx context.Context) error
}type Locker interface {
Lock(ctx context.Context, key string) (Lock, error)
}Lock attempts to acquire a distributed lock:
key: Job name (or qualified function name if unnamed)Lock if acquired successfullytype Lock interface {
Unlock(ctx context.Context) error
}Unlock releases the lock after job completion.
func WithDistributedLocker(locker Locker) SchedulerOptionApplies to all jobs:
s, err := gocron.NewScheduler(
gocron.WithDistributedLocker(locker),
)Returns ErrWithDistributedLockerNil if locker is nil.
func WithDistributedJobLocker(locker Locker) JobOptionOverrides scheduler-level locker for specific job:
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(myFunc),
gocron.WithDistributedJobLocker(customLocker), // Override
)Returns ErrWithDistributedJobLockerNil if locker is nil.
func WithDisabledDistributedJobLocker(disabled bool) JobOptionDisables scheduler-level locker for specific job:
s, _ := gocron.NewScheduler(
gocron.WithDistributedLocker(locker), // Applied to all jobs
)
// This job doesn't use distributed locking
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(localOnlyFunc),
gocron.WithDisabledDistributedJobLocker(true),
)locker.Lock(ctx, jobName)lock.Unlock() calledInstance A:
09:00 - Lock "job1" → success → Run job1 → Unlock
09:01 - Lock "job2" → success → Run job2 → Unlock
Instance B:
09:00 - Lock "job1" → fail (A holds it) → Skip
09:01 - Lock "job2" → fail (A holds it) → SkipLock key is the job's name (set via WithName):
// Lock key: "database-cleanup"
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(myFunc),
gocron.WithName("database-cleanup"), // This becomes the lock key
)If no name set, qualified function name is used:
// Lock key: "mypackage.myFunc"
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(myFunc), // No name → uses "mypackage.myFunc"
)Important: Use WithName for stable, predictable lock keys.
Use distributed locking when:
Don't use distributed locking when:
Use Leader Election instead for all-or-nothing control.
import (
"context"
"errors"
"time"
"github.com/go-co-op/gocron/v2"
"github.com/redis/go-redis/v9"
)
type redisLocker struct {
client *redis.Client
}
func NewRedisLocker(client *redis.Client) *redisLocker {
return &redisLocker{client: client}
}
func (l *redisLocker) Lock(ctx context.Context, key string) (gocron.Lock, error) {
// TTL should be longer than max expected job duration
ttl := 5 * time.Minute
lockKey := "gocron:lock:" + key
// Try to acquire lock
ok, err := l.client.SetNX(ctx, lockKey, "1", ttl).Result()
if err != nil {
return nil, err
}
if !ok {
return nil, errors.New("lock already held")
}
return &redisLock{client: l.client, key: lockKey}, nil
}
type redisLock struct {
client *redis.Client
key string
}
func (l *redisLock) Unlock(ctx context.Context) error {
return l.client.Del(ctx, l.key).Err()
}func main() {
client := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
defer client.Close()
locker := NewRedisLocker(client)
s, _ := gocron.NewScheduler(
gocron.WithDistributedLocker(locker),
)
defer s.Shutdown()
s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(doWork),
gocron.WithName("my-job"), // Lock key
)
s.Start()
select {}
}For long-running jobs, renew the lock periodically:
type renewableRedisLock struct {
client *redis.Client
key string
ttl time.Duration
cancel context.CancelFunc
}
func (l *redisLocker) Lock(ctx context.Context, key string) (gocron.Lock, error) {
ttl := 5 * time.Minute
lockKey := "gocron:lock:" + key
ok, err := l.client.SetNX(ctx, lockKey, "1", ttl).Result()
if err != nil {
return nil, err
}
if !ok {
return nil, errors.New("lock already held")
}
lock := &renewableRedisLock{
client: l.client,
key: lockKey,
ttl: ttl,
}
lock.startRenewal(ctx)
return lock, nil
}
func (l *renewableRedisLock) startRenewal(ctx context.Context) {
renewCtx, cancel := context.WithCancel(ctx)
l.cancel = cancel
ticker := time.NewTicker(l.ttl / 2) // Renew at half TTL
go func() {
defer ticker.Stop()
for {
select {
case <-renewCtx.Done():
return
case <-ticker.C:
l.client.Expire(ctx, l.key, l.ttl)
}
}
}()
}
func (l *renewableRedisLock) Unlock(ctx context.Context) error {
l.cancel() // Stop renewal
return l.client.Del(ctx, l.key).Err()
}import (
"context"
"database/sql"
"errors"
"hash/fnv"
"github.com/go-co-op/gocron/v2"
_ "github.com/lib/pq"
)
type postgresLocker struct {
db *sql.DB
}
func NewPostgresLocker(db *sql.DB) *postgresLocker {
return &postgresLocker{db: db}
}
func (l *postgresLocker) Lock(ctx context.Context, key string) (gocron.Lock, error) {
// Hash the key to a 64-bit integer
lockID := hashKeyToInt64(key)
// Try to acquire advisory lock
var acquired bool
err := l.db.QueryRowContext(ctx, "SELECT pg_try_advisory_lock($1)", lockID).Scan(&acquired)
if err != nil {
return nil, err
}
if !acquired {
return nil, errors.New("lock already held")
}
return &postgresLock{db: l.db, lockID: lockID}, nil
}
type postgresLock struct {
db *sql.DB
lockID int64
}
func (l *postgresLock) Unlock(ctx context.Context) error {
var released bool
err := l.db.QueryRowContext(ctx, "SELECT pg_advisory_unlock($1)", l.lockID).Scan(&released)
if err != nil {
return err
}
if !released {
return errors.New("lock was not held")
}
return nil
}
func hashKeyToInt64(key string) int64 {
h := fnv.New64a()
h.Write([]byte(key))
return int64(h.Sum64())
}Advantages:
Limitations:
import (
"context"
"errors"
"time"
"github.com/go-co-op/gocron/v2"
"github.com/go-redsync/redsync/v4"
"github.com/go-redsync/redsync/v4/redis/goredis/v9"
"github.com/redis/go-redis/v9"
)
type redlockLocker struct {
rs *redsync.Redsync
}
func NewRedlockLocker(clients []*redis.Client) *redlockLocker {
pools := make([]redsync.Pool, len(clients))
for i, client := range clients {
pools[i] = goredis.NewPool(client)
}
rs := redsync.New(pools...)
return &redlockLocker{rs: rs}
}
func (l *redlockLocker) Lock(ctx context.Context, key string) (gocron.Lock, error) {
mutex := l.rs.NewMutex(
"gocron:lock:"+key,
redsync.WithExpiry(5*time.Minute),
redsync.WithTries(3),
)
if err := mutex.LockContext(ctx); err != nil {
return nil, errors.New("lock already held")
}
return &redlockLock{mutex: mutex}, nil
}
type redlockLock struct {
mutex *redsync.Mutex
}
func (l *redlockLock) Unlock(ctx context.Context) error {
_, err := l.mutex.UnlockContext(ctx)
return err
}Advantages:
// GOOD: Stable, explicit name
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(myFunc),
gocron.WithName("database-cleanup"), // Explicit lock key
)
// BAD: Function name may change if code refactored
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(myFunc), // Default key = "pkg.myFunc"
)// TTL should be 2-3x max expected job duration
// Job takes 1-2 minutes → TTL = 5 minutes
ttl := 5 * time.MinuteToo short: Lock expires during job execution, another instance acquires it Too long: Failed instance holds lock longer, delays failover
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(myFunc),
gocron.WithName("my-job"),
gocron.WithEventListeners(
gocron.AfterLockError(func(jobID uuid.UUID, jobName string, err error) {
log.Printf("Lock failed for %s: %v", jobName, err)
metrics.IncrementLockFailure(jobName)
}),
),
)Use NTP to synchronize clocks:
# Install NTP
sudo apt-get install ntp
# Verify synchronization
ntpq -pClock skew impact:
5 seconds: One instance consistently wins locks
For distributed deployments, align jobs to time boundaries:
// Align to 5-minute boundaries
now := time.Now()
next5Min := now.Truncate(5*time.Minute).Add(5*time.Minute)
j, _ := s.NewJob(
gocron.DurationJob(5*time.Minute),
gocron.NewTask(myFunc),
gocron.WithName("aligned-job"),
gocron.WithStartAt(gocron.WithStartDateTime(next5Min)),
)Reduces lock contention by ensuring all instances schedule at the same time.
type monitoringLocker struct {
locker Locker
}
func (l *monitoringLocker) Lock(ctx context.Context, key string) (gocron.Lock, error) {
start := time.Now()
lock, err := l.locker.Lock(ctx, key)
duration := time.Since(start)
if err != nil {
metrics.IncrementLockFailure(key)
log.Printf("Lock failed for %s after %v: %v", key, duration, err)
} else {
metrics.RecordLockAcquisition(key, duration.Seconds())
}
return lock, err
}s, _ := gocron.NewScheduler(
gocron.WithDistributedLocker(redisLocker),
)
// Most jobs use Redis locker
s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(normalJob),
gocron.WithName("normal-job"),
)
// Critical job uses PostgreSQL locker for stronger guarantees
s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(criticalJob),
gocron.WithName("critical-job"),
gocron.WithDistributedJobLocker(postgresLocker), // Override
)s, _ := gocron.NewScheduler(
gocron.WithDistributedLocker(locker),
)
// Distributed job
s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(distributedJob),
gocron.WithName("distributed-job"),
)
// Local-only job (no locking)
s.NewJob(
gocron.DurationJob(time.Second),
gocron.NewTask(localMetrics),
gocron.WithDisabledDistributedJobLocker(true),
)// Group jobs by tenant/resource
j, _ := s.NewJob(
gocron.DurationJob(time.Minute),
gocron.NewTask(func(tenantID string) {
processTenant(tenantID)
}, "tenant-123"),
gocron.WithName("process-tenant:tenant-123"), // Lock per tenant
)Symptom: Same instance consistently acquires locks.
Causes:
Solutions:
Symptom: All instances fail to acquire locks.
Causes:
Solutions:
Symptom: Lock held indefinitely, jobs stop running.
Causes:
Solutions:
Symptom: Many lock failures, jobs often skipped.
Causes:
Solutions:
Locker, Lock interfacesWithDistributedLockerWithDistributedJobLocker, WithDisabledDistributedJobLockerInstall with Tessl CLI
npx tessl i tessl/golang-github-com-go-co-op-gocron-v2@2.19.1docs
api
examples
guides