or run

tessl search
Log in

Version

Workspace
tessl
Visibility
Public
Created
Last updated
Describes
golangpkg:golang/cloud.google.com/go/storage@1.59.x

docs

access-control.mdadvanced-features.mdclient-and-buckets.mdcontrol-api.mdexperimental-features.mdindex.mdobject-operations.mdperformance-features.md
tile.json

tessl/golang-cloud-google-com-go-storage

tessl install tessl/golang-cloud-google-com-go-storage@1.59.0

Google Cloud Storage client library for Go providing comprehensive APIs for bucket and object operations, access control, and advanced features

advanced-features.mddocs/

Advanced Features

This document covers advanced bucket and object features including lifecycle policies, retention policies, versioning, soft delete, autoclass, and encryption options. These features are part of the main storage package.

For lifecycle policies and other bucket configuration, see Client and Bucket Operations. This document provides additional context and examples for advanced use cases.

Object Versioning

Enable versioning to preserve, retrieve, and restore every version of every object.

Configuration:

import (
    "cloud.google.com/go/storage"
)

// Enable versioning on bucket
bucket := client.Bucket("my-bucket")
attrs, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
    VersioningEnabled: optional.Bool(true),
})
if err != nil {
    log.Fatal(err)
}

// Disable versioning
attrs, err = bucket.Update(ctx, storage.BucketAttrsToUpdate{
    VersioningEnabled: optional.Bool(false),
})

Working with Versions:

// List all versions of an object
query := &storage.Query{
    Prefix:   "my-object.txt",
    Versions: true,
}
it := bucket.Objects(ctx, query)
for {
    attrs, err := it.Next()
    if err == iterator.Done {
        break
    }
    if err != nil {
        log.Fatal(err)
    }
    fmt.Printf("Generation %d: created %v, size %d\n",
        attrs.Generation, attrs.Created, attrs.Size)
}

// Read specific version
obj := bucket.Object("my-object.txt").Generation(12345)
r, err := obj.NewReader(ctx)
if err != nil {
    log.Fatal(err)
}
defer r.Close()

data, err := io.ReadAll(r)

// Delete specific version (permanent)
obj = bucket.Object("my-object.txt").Generation(12345)
err = obj.Delete(ctx)

// Delete current version (creates archived version if versioning enabled)
obj = bucket.Object("my-object.txt")
err = obj.Delete(ctx)

Retention and Holds

Protect objects from deletion and modification.

Retention Policy

// Set retention policy on bucket
retentionPeriod := 30 * 24 * time.Hour // 30 days
attrs, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
    RetentionPolicy: &storage.RetentionPolicy{
        RetentionPeriod: retentionPeriod,
    },
})
if err != nil {
    log.Fatal(err)
}

// Lock retention policy (irreversible!)
err = bucket.LockRetentionPolicy(ctx)
if err != nil {
    log.Fatal(err)
}

// Check if locked
attrs, err = bucket.Attrs(ctx)
if attrs.RetentionPolicy.IsLocked {
    fmt.Println("Retention policy is locked")
}

// Remove retention policy (only if not locked)
attrs, err = bucket.Update(ctx, storage.BucketAttrsToUpdate{
    RetentionPolicy: &storage.RetentionPolicy{
        RetentionPeriod: 0, // Zero duration removes policy
    },
})

Object Holds

// Set event-based hold
obj := bucket.Object("important.doc")
attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{
    EventBasedHold: optional.Bool(true),
})

// Set temporary hold
attrs, err = obj.Update(ctx, storage.ObjectAttrsToUpdate{
    TemporaryHold: optional.Bool(true),
})

// Check holds
attrs, err = obj.Attrs(ctx)
if attrs.EventBasedHold {
    fmt.Println("Event-based hold is active")
}
if attrs.TemporaryHold {
    fmt.Println("Temporary hold is active")
}

// Release holds
attrs, err = obj.Update(ctx, storage.ObjectAttrsToUpdate{
    EventBasedHold: optional.Bool(false),
    TemporaryHold:  optional.Bool(false),
})

// Check retention expiration
if !attrs.RetentionExpirationTime.IsZero() {
    fmt.Printf("Retention expires: %v\n", attrs.RetentionExpirationTime)
}

Soft Delete

Soft delete allows recovery of deleted objects within a retention period.

// Configure soft delete policy (7 days default for new buckets)
softDeleteDuration := 7 * 24 * time.Hour
attrs, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
    SoftDeletePolicy: &storage.SoftDeletePolicy{
        RetentionDuration: softDeleteDuration,
    },
})

// Disable soft delete (set to 0)
attrs, err = bucket.Update(ctx, storage.BucketAttrsToUpdate{
    SoftDeletePolicy: &storage.SoftDeletePolicy{
        RetentionDuration: 0,
    },
})

// Check soft delete settings
attrs, err = bucket.Attrs(ctx)
if attrs.SoftDeletePolicy != nil {
    fmt.Printf("Soft delete retention: %v\n", attrs.SoftDeletePolicy.RetentionDuration)
    fmt.Printf("Effective since: %v\n", attrs.SoftDeletePolicy.EffectiveTime)
}

// Recover soft-deleted object
// Soft-deleted objects appear in versioned listings
query := &storage.Query{
    Prefix:   "deleted-file.txt",
    Versions: true,
}
it := bucket.Objects(ctx, query)
for {
    attrs, err := it.Next()
    if err == iterator.Done {
        break
    }
    if err != nil {
        log.Fatal(err)
    }
    if !attrs.SoftDeleteTime.IsZero() {
        fmt.Printf("Soft-deleted at %v, hard delete at %v\n",
            attrs.SoftDeleteTime, attrs.HardDeleteTime)

        // Copy to recover
        src := bucket.Object(attrs.Name).Generation(attrs.Generation)
        dst := bucket.Object(attrs.Name) // New current version
        copier := dst.CopierFrom(src)
        _, err = copier.Run(ctx)
        if err != nil {
            log.Fatal(err)
        }
    }
}

Autoclass

Automatic storage class management based on access patterns.

// Enable autoclass
attrs, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
    Autoclass: &storage.Autoclass{
        Enabled: true,
    },
})

// Enable with terminal storage class
attrs, err = bucket.Update(ctx, storage.BucketAttrsToUpdate{
    Autoclass: &storage.Autoclass{
        Enabled:              true,
        TerminalStorageClass: "ARCHIVE",
    },
})

// Check autoclass status
attrs, err = bucket.Attrs(ctx)
if attrs.Autoclass != nil && attrs.Autoclass.Enabled {
    fmt.Printf("Autoclass enabled since: %v\n", attrs.Autoclass.ToggleTime)
    fmt.Printf("Terminal storage class: %s\n", attrs.Autoclass.TerminalStorageClass)
}

// Disable autoclass
attrs, err = bucket.Update(ctx, storage.BucketAttrsToUpdate{
    Autoclass: &storage.Autoclass{
        Enabled: false,
    },
})

Encryption

Customer-supplied and customer-managed encryption keys.

Customer-Supplied Encryption Keys (CSEK)

// Create AES-256 key (32 bytes)
key := make([]byte, 32)
if _, err := rand.Read(key); err != nil {
    log.Fatal(err)
}

// Upload with encryption
obj := bucket.Object("encrypted.dat").Key(key)
w := obj.NewWriter(ctx)
if _, err := w.Write([]byte("sensitive data")); err != nil {
    log.Fatal(err)
}
if err := w.Close(); err != nil {
    log.Fatal(err)
}

// Download with encryption key
obj = bucket.Object("encrypted.dat").Key(key)
r, err := obj.NewReader(ctx)
if err != nil {
    log.Fatal(err)
}
defer r.Close()

data, err := io.ReadAll(r)

// Check encryption in attributes
attrs, err := obj.Attrs(ctx)
fmt.Printf("Encryption key SHA256: %s\n", attrs.CustomerKeySHA256)

Customer-Managed Encryption Keys (CMEK via Cloud KMS)

// Set default KMS key on bucket
kmsKeyName := "projects/my-project/locations/us/keyRings/my-ring/cryptoKeys/my-key"
attrs, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
    Encryption: &storage.BucketEncryption{
        DefaultKMSKeyName: kmsKeyName,
    },
})

// Upload - automatically uses bucket's default KMS key
obj := bucket.Object("data.bin")
w := obj.NewWriter(ctx)
w.Write(data)
w.Close()

// Copy with different KMS key
src := bucket.Object("source.bin")
dst := bucket.Object("encrypted-copy.bin")
copier := dst.CopierFrom(src)
copier.DestinationKMSKeyName = kmsKeyName
attrs, err = copier.Run(ctx)

Custom Time

User-specified timestamp for lifecycle rules and custom workflows.

// Set custom time on upload
customTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
obj := bucket.Object("document.pdf")
w := obj.NewWriter(ctx)
w.CustomTime = customTime
w.Write(data)
w.Close()

// Update custom time
attrs, err := obj.Update(ctx, storage.ObjectAttrs ToUpdate{
    CustomTime: time.Now(),
})

// Use in lifecycle rules
lifecycle := storage.Lifecycle{
    Rules: []storage.LifecycleRule{
        {
            Action: storage.LifecycleAction{
                Type: storage.DeleteAction,
            },
            Condition: storage.LifecycleCondition{
                // Delete 90 days after custom time
                DaysSinceCustomTime: 90,
            },
        },
    },
}

attrs, err = bucket.Update(ctx, storage.BucketAttrsToUpdate{
    Lifecycle: &lifecycle,
})

Hierarchical Namespace

Enable folder support for buckets.

// Create bucket with hierarchical namespace
// Must be set at bucket creation - cannot be changed later
// Requires UniformBucketLevelAccess to be enabled
bucketAttrs := &storage.BucketAttrs{
    Name:     "my-hns-bucket",
    Location: "US",
    UniformBucketLevelAccess: storage.UniformBucketLevelAccess{
        Enabled: true,
    },
    HierarchicalNamespace: &storage.HierarchicalNamespace{
        Enabled: true,
    },
}

bucket := client.Bucket("my-hns-bucket")
err := bucket.Create(ctx, "my-project", bucketAttrs)
if err != nil {
    log.Fatal(err)
}

// Check if hierarchical namespace is enabled
attrs, err := bucket.Attrs(ctx)
if attrs.HierarchicalNamespace != nil && attrs.HierarchicalNamespace.Enabled {
    fmt.Println("Hierarchical namespace enabled")
    // Use Storage Control API for folder operations
    // See control-api.md for folder management
}

Requester Pays

Charge storage costs to the requester instead of the bucket owner.

// Enable requester pays on bucket
attrs, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
    RequesterPays: optional.Bool(true),
})

// Access requester pays bucket (provide billing project)
bucket = client.Bucket("requester-pays-bucket").UserProject("my-billing-project")

// All operations now bill to my-billing-project
obj := bucket.Object("data.bin")
r, err := obj.NewReader(ctx)
if err != nil {
    log.Fatal(err)
}
defer r.Close()

// Check requester pays status
attrs, err = bucket.Attrs(ctx)
if attrs.RequesterPays {
    fmt.Println("Requester pays enabled")
}

RPO (Recovery Point Objective)

Configure replication for dual-region buckets.

// Set turbo replication (dual-region only)
attrs, err := bucket.Update(ctx, storage.BucketAttrsToUpdate{
    RPO: storage.RPOAsyncTurbo,
})

// Set default replication
attrs, err = bucket.Update(ctx, storage.BucketAttrsToUpdate{
    RPO: storage.RPODefault,
})

// Check RPO setting
attrs, err = bucket.Attrs(ctx)
fmt.Printf("RPO: %s\n", attrs.RPO)
if attrs.RPO == storage.RPOAsyncTurbo {
    fmt.Println("Turbo replication enabled")
}

Custom Placement (Dual Regions)

Specify exact regions for dual-region buckets.

// Create bucket with custom dual-region placement
bucketAttrs := &storage.BucketAttrs{
    Name:     "my-dual-region-bucket",
    Location: "US", // Must be compatible with data locations
    CustomPlacementConfig: &storage.CustomPlacementConfig{
        DataLocations: []string{"us-east1", "us-west1"},
    },
}

bucket := client.Bucket("my-dual-region-bucket")
err := bucket.Create(ctx, "my-project", bucketAttrs)

// Check custom placement
attrs, err := bucket.Attrs(ctx)
if attrs.CustomPlacementConfig != nil {
    fmt.Printf("Data locations: %v\n", attrs.CustomPlacementConfig.DataLocations)
}