tessl install tessl/golang-cloud-google-com-go-storage@1.59.0Google Cloud Storage client library for Go providing comprehensive APIs for bucket and object operations, access control, and advanced features
The Google Cloud Storage client library for Go provides comprehensive, production-ready APIs for interacting with Google Cloud Storage (GCS). This library enables bucket and object operations, access control management, lifecycle policies, signed URLs, and advanced features like versioning and retention. It supports both HTTP/JSON and gRPC transports with full feature parity, offering flexibility for different performance and connectivity requirements.
go get cloud.google.com/go/storage@v1.59.1import (
"cloud.google.com/go/storage"
"context"
)For Storage Control API (folders, managed folders, caches):
import (
control "cloud.google.com/go/storage/control/apiv2"
controlpb "cloud.google.com/go/storage/control/apiv2/controlpb"
)For experimental features:
import "cloud.google.com/go/storage/experimental"For performance features (preview):
import (
"cloud.google.com/go/storage/dataflux"
"cloud.google.com/go/storage/transfermanager"
)package main
import (
"context"
"fmt"
"io"
"log"
"os"
"cloud.google.com/go/storage"
)
func main() {
ctx := context.Background()
// Create client (HTTP/JSON transport)
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatal(err)
}
defer client.Close()
// Upload an object
bucket := client.Bucket("my-bucket")
obj := bucket.Object("my-object.txt")
w := obj.NewWriter(ctx)
if _, err := w.Write([]byte("Hello, Cloud Storage!")); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Download an object
r, err := obj.NewReader(ctx)
if err != nil {
log.Fatal(err)
}
defer r.Close()
data, err := io.ReadAll(r)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Object contents: %s\n", data)
// List objects
it := bucket.Objects(ctx, &storage.Query{Prefix: "my-"})
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Printf("Object: %s\n", attrs.Name)
}
}The storage package is organized around several key architectural patterns:
Client, BucketHandle, and ObjectHandle provide lazy resource references that support method chaining and configuration before executionNewClient) or gRPC (NewGRPCClient) transports with identical APIsReader and Writer interfaces enable memory-efficient transfers of large objectsObjectIterator and BucketIteratorRetryOptionCore client creation, bucket management, and bucket-level configuration including lifecycle policies, CORS, encryption, and logging.
// Create HTTP/JSON client
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error)
// Create gRPC client
func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error)
// Client methods
type Client struct {
// Methods
Bucket(name string) *BucketHandle
Buckets(ctx context.Context, projectID string) *BucketIterator
CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error)
ServiceAccount(ctx context.Context, projectID string) (string, error)
SetRetry(opts ...RetryOption)
Close() error
}Comprehensive object operations including reading, writing, metadata management, copying, and composing multiple objects.
// Object handle creation
func (b *BucketHandle) Object(name string) *ObjectHandle
// Core object operations
type ObjectHandle struct {
// Read operations
NewReader(ctx context.Context) (*Reader, error)
NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error)
// Write operations
NewWriter(ctx context.Context) *Writer
// Metadata operations
Attrs(ctx context.Context) (*ObjectAttrs, error)
Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error)
Delete(ctx context.Context) error
// Advanced operations
CopierFrom(src *ObjectHandle) *Copier
ComposerFrom(sources ...*ObjectHandle) *Composer
}Access control lists (ACLs), IAM policies, signed URLs for time-limited access, and POST policies for browser uploads.
// ACL operations
type ACLHandle struct {
List(ctx context.Context) ([]ACLRule, error)
Set(ctx context.Context, entity ACLEntity, role ACLRole) error
Delete(ctx context.Context, entity ACLEntity) error
}
// Signed URL generation
func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error)
// IAM policy management
func (b *BucketHandle) IAM() *iam.HandleLifecycle policies, retention policies, versioning, soft delete, autoclass, hierarchical namespaces, and encryption options.
// Lifecycle management
type Lifecycle struct {
Rules []LifecycleRule
}
type LifecycleRule struct {
Action LifecycleAction
Condition LifecycleCondition
}
// Retention policy
type RetentionPolicy struct {
RetentionPeriod time.Duration
EffectiveTime time.Time
IsLocked bool
}
// Versioning
type BucketAttrs struct {
VersioningEnabled bool
// ... other fields
}Folders, managed folders, anywhere caches, storage layouts, and intelligence configurations for advanced bucket organization.
// Create Storage Control client
func NewStorageControlClient(ctx context.Context, opts ...option.ClientOption) (*StorageControlClient, error)
// Folder operations
type StorageControlClient struct {
CreateFolder(ctx context.Context, req *controlpb.CreateFolderRequest, opts ...gax.CallOption) (*controlpb.Folder, error)
DeleteFolder(ctx context.Context, req *controlpb.DeleteFolderRequest, opts ...gax.CallOption) error
RenameFolder(ctx context.Context, req *controlpb.RenameFolderRequest, opts ...gax.CallOption) (*RenameFolderOperation, error)
// Managed folder operations
CreateManagedFolder(ctx context.Context, req *controlpb.CreateManagedFolderRequest, opts ...gax.CallOption) (*controlpb.ManagedFolder, error)
// Cache operations
CreateAnywhereCache(ctx context.Context, req *controlpb.CreateAnywhereCacheRequest, opts ...gax.CallOption) (*CreateAnywhereCacheOperation, error)
}High-performance parallelized listing (dataflux) and downloads (transfer manager) for large-scale operations.
// Fast parallel listing (preview)
func NewLister(c *storage.Client, in *ListerInput) (*Lister, error)
type Lister struct {
NextBatch(ctx context.Context) ([]*storage.ObjectAttrs, error)
Close()
}
// Parallel downloads (preview)
func NewDownloader(c *storage.Client, opts ...Option) (*Downloader, error)
type Downloader struct {
DownloadObject(ctx context.Context, input *DownloadObjectInput) error
DownloadDirectory(ctx context.Context, input *DownloadDirectoryInput) error
WaitAndClose() error
}Experimental APIs including bi-directional gRPC reads, read stall timeout, custom metrics, and zonal bucket APIs.
// Experimental client options
func WithGRPCBidiReads() option.ClientOption
func WithZonalBucketAPIs() option.ClientOption
func WithReadStallTimeout(config *ReadStallTimeoutConfig) option.ClientOption
func WithMeterProvider(mp metric.MeterProvider) option.ClientOption// Storage classes
const (
StorageClassStandard = "STANDARD"
StorageClassNearline = "NEARLINE"
StorageClassColdline = "COLDLINE"
StorageClassArchive = "ARCHIVE"
StorageClassMultiRegional = "MULTI_REGIONAL"
StorageClassRegional = "REGIONAL"
)
// ACL roles
const (
RoleOwner ACLRole = "OWNER"
RoleReader ACLRole = "READER"
RoleWriter ACLRole = "WRITER"
)
// ACL entities
const (
AllUsers ACLEntity = "allUsers"
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// Lifecycle actions
const (
DeleteAction = "Delete"
SetStorageClassAction = "SetStorageClass"
AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload"
)
// RPO (Recovery Point Objective) settings
const (
RPOUnknown RPO = ""
RPODefault RPO = "DEFAULT"
RPOAsyncTurbo RPO = "ASYNC_TURBO"
)
// Public access prevention
const (
PublicAccessPreventionUnknown PublicAccessPrevention = ""
PublicAccessPreventionEnforced PublicAccessPrevention = "enforced"
PublicAccessPreventionInherited PublicAccessPrevention = "inherited"
)
// Projections for attribute selection
const (
ProjectionFull Projection = "full"
ProjectionNoACL Projection = "noAcl"
)
// Notification event types
const (
ObjectFinalizeEvent = "OBJECT_FINALIZE"
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
ObjectDeleteEvent = "OBJECT_DELETE"
ObjectArchiveEvent = "OBJECT_ARCHIVE"
)
// Notification payload formats
const (
NoPayload = "NONE"
JSONPayload = "JSON_API_V1"
)// ACL types
type ACLEntity string
type ACLRole string
type ACLRule struct {
Entity ACLEntity
Role ACLRole
Domain string
Email string
EntityID string
ProjectTeam *ProjectTeam
}
type ProjectTeam struct {
ProjectNumber string
Team string
}
// Condition types for preconditions
type Conditions struct {
GenerationMatch int64
GenerationNotMatch int64
MetagenerationMatch int64
MetagenerationNotMatch int64
DoesNotExist bool
}
type BucketConditions struct {
MetagenerationMatch int64
MetagenerationNotMatch int64
}
// Retry configuration
type RetryOption interface {
// contains filtered or unexported methods
}
func WithBackoff(backoff gax.Backoff) RetryOption
func WithPolicy(policy RetryPolicy) RetryOption
func WithErrorFunc(errorFunc func(error) bool) RetryOption
func WithMaxAttempts(maxAttempts int) RetryOption
type RetryPolicy int
const (
RetryIdempotent RetryPolicy = iota
RetryAlways
RetryNever
)
// Error checking
func ShouldRetry(err error) bool
// Predefined errors
var (
ErrBucketNotExist error // Use errors.Is to check
ErrObjectNotExist error // Use errors.Is to check
)The storage client supports multiple authentication mechanisms:
GOOGLE_APPLICATION_CREDENTIALSoption.WithCredentials() or option.WithCredentialsFile()STORAGE_EMULATOR_HOST for local development// Example: Custom credentials
import "google.golang.org/api/option"
client, err := storage.NewClient(ctx,
option.WithCredentialsFile("path/to/keyfile.json"),
)
// Example: Custom scopes
client, err := storage.NewClient(ctx,
option.WithScopes(storage.ScopeReadOnly),
)
// Scope constants
const (
ScopeFullControl = "https://www.googleapis.com/auth/devstorage.full_control"
ScopeReadOnly = "https://www.googleapis.com/auth/devstorage.read_only"
ScopeReadWrite = "https://www.googleapis.com/auth/devstorage.read_write"
)Choose between HTTP/JSON and gRPC transports based on your requirements:
HTTP/JSON Transport (default):
gRPC Transport:
// HTTP/JSON client (default)
httpClient, err := storage.NewClient(ctx)
// gRPC client
grpcClient, err := storage.NewGRPCClient(ctx)
// Check direct connectivity support (gRPC)
err := storage.CheckDirectConnectivitySupported(ctx, "bucket-name")
// Client options for read API selection
storage.WithJSONReads() // Force JSON API for reads
storage.WithXMLReads() // Force XML API for readsclient.Close() to release resources. Reuse clients across operations.Client, BucketHandle, and ObjectHandle are safe for concurrent use.context.Context for cancellation and deadlines.errors.Is() to check for ErrBucketNotExist and ErrObjectNotExist.Reader and Writer for large objects to avoid memory issues.RetryOption.Generation() for specific versions.dataflux and transfermanager are in preview; APIs may change.