Google Cloud Storage client library for Go providing comprehensive APIs for bucket and object operations, access control, and advanced features
—
This document covers all object operations including reading, writing, metadata management, copying, composing, and querying objects within Google Cloud Storage buckets.
Reference to an object in a bucket. Used to perform operations on the object.
/**
* ObjectHandle provides operations on a Google Cloud Storage object.
* Use BucketHandle.Object to get a handle.
*/
type ObjectHandle struct {
// contains filtered or unexported fields
}
/**
* Returns the object's metadata.
* @param ctx - Context for the operation
* @returns ObjectAttrs and error
*/
func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error)
/**
* Updates the object's metadata.
* @param ctx - Context for the operation
* @param uattrs - Attributes to update
* @returns Updated ObjectAttrs and error
*/
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error)
/**
* Deletes the object.
* @param ctx - Context for the operation
* @returns Error if deletion fails
*/
func (o *ObjectHandle) Delete(ctx context.Context) error
/**
* Creates a new Reader for downloading the object.
* Caller must call Close on the returned Reader.
* @param ctx - Context for the operation
* @returns Reader and error
*/
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error)
/**
* Creates a Reader for downloading part of the object.
* If length is negative, reads until the end.
* If offset is negative, reads abs(offset) bytes from end.
* @param ctx - Context for the operation
* @param offset - Starting byte offset
* @param length - Number of bytes to read (-1 for all)
* @returns Reader and error
*/
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error)
/**
* Creates a Writer for uploading the object.
* Caller must call Close on the returned Writer.
* @param ctx - Context for the operation
* @returns Writer
*/
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer
/**
* Creates a Copier to copy from the source object.
* @param src - Source ObjectHandle
* @returns Copier
*/
func (o *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier
/**
* Creates a Composer to compose multiple objects into this object.
* @param sources - Source ObjectHandles to compose
* @returns Composer
*/
func (o *ObjectHandle) ComposerFrom(sources ...*ObjectHandle) *Composer
/**
* Returns an ObjectHandle with preconditions applied.
* @param conds - Preconditions for operations
* @returns ObjectHandle with conditions
*/
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle
/**
* Returns an ObjectHandle with customer-supplied encryption key.
* @param encryptionKey - AES-256 encryption key (32 bytes)
* @returns ObjectHandle with encryption key
*/
func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle
/**
* Returns an ObjectHandle for a specific generation.
* @param gen - Object generation number
* @returns ObjectHandle for specific generation
*/
func (o *ObjectHandle) Generation(gen int64) *ObjectHandle
/**
* Configures whether to read compressed data directly.
* When true, prevents automatic decompression of gzip-encoded objects.
* @param compressed - True to read compressed data
* @returns ObjectHandle with read compressed setting
*/
func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle
/**
* Returns the object's ACL handle.
* @returns ACLHandle for object access control
*/
func (o *ObjectHandle) ACL() *ACLHandle
/**
* Configures retry behavior for this object handle.
* @param opts - Retry configuration options
* @returns ObjectHandle with retry configuration
*/
func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle
/**
* Sets the user project for Requester Pays buckets.
* @param projectID - Project ID to bill
* @returns ObjectHandle with user project set
*/
func (o *ObjectHandle) UserProject(projectID string) *ObjectHandle
/**
* Returns the bucket name for this object.
* @returns Bucket name
*/
func (o *ObjectHandle) BucketName() string
/**
* Returns the object name.
* @returns Object name
*/
func (o *ObjectHandle) ObjectName() string
/**
* Moves an object to a new location within the same bucket or to another bucket.
* This is a copy followed by delete of the source.
* @param ctx - Context for the operation
* @param destination - Destination object specification
* @returns ObjectAttrs of moved object and error
*/
func (o *ObjectHandle) Move(ctx context.Context, destination MoveObjectDestination) (*ObjectAttrs, error)
/**
* Creates a multi-range downloader for reading multiple byte ranges.
* This API is currently in preview and not yet available for general use.
* @param ctx - Context for the operation
* @returns MultiRangeDownloader and error
*/
func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (*MultiRangeDownloader, error)
/**
* Creates a Writer from an existing appendable object.
* The object must have been created with append semantics and not finalized.
* Only supported for gRPC clients. This feature is in preview.
* @param ctx - Context for the operation
* @param opts - Appendable writer options
* @returns Writer, current object size, and error
*/
func (o *ObjectHandle) NewWriterFromAppendableObject(ctx context.Context, opts *AppendableWriterOpts) (*Writer, int64, error)
/**
* Configures whether to override an unlocked retention policy.
* @param override - True to override unlocked retention
* @returns ObjectHandle with override setting
*/
func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle
/**
* Sets a read handle for the object.
* The read handle is periodically refreshed.
* @param r - Read handle bytes
* @returns ObjectHandle with read handle
*/
func (o *ObjectHandle) ReadHandle(r ReadHandle) *ObjectHandle
/**
* Restores a soft-deleted object.
* @param ctx - Context for the operation
* @param opts - Restore options
* @returns Restored ObjectAttrs and error
*/
func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*ObjectAttrs, error)
/**
* Returns an ObjectHandle configured to operate on soft-deleted objects.
* @returns ObjectHandle for soft-deleted objects
*/
func (o *ObjectHandle) SoftDeleted() *ObjectHandleComplete metadata for an object.
/**
* ObjectAttrs represents the metadata for a Google Cloud Storage object.
*/
type ObjectAttrs struct {
// Bucket is the name of the bucket containing this object
Bucket string
// Name is the name of the object
Name string
// ContentType is the MIME type of the object's content
ContentType string
// ContentLanguage is the content language of the object
ContentLanguage string
// ContentEncoding is the encoding of the object's content
ContentEncoding string
// ContentDisposition is the Content-Disposition header
ContentDisposition string
// CacheControl is the Cache-Control header
CacheControl string
// ACL is the list of access control rules for the object
ACL []ACLRule
// Owner is the owner of the object (read-only)
Owner string
// Size is the length of the object's content in bytes
Size int64
// MD5 is the MD5 hash of the object's content
// Optional for uploads, verified when present
MD5 []byte
// CRC32C is the CRC32C checksum of the object's content
// Automatically calculated and verified for uploads
CRC32C uint32
// MediaLink is the media download URL (read-only)
MediaLink string
// Metadata is custom user-provided metadata
Metadata map[string]string
// Generation is the generation number of the object's content
Generation int64
// Metageneration is the generation number of the object's metadata
Metageneration int64
// StorageClass is the storage class of the object
StorageClass string
// Created is the time the object was created (read-only)
Created time.Time
// Deleted is the time the object was deleted (read-only)
// Only set for versioned objects that have been deleted
Deleted time.Time
// Updated is the last modification time (read-only)
Updated time.Time
// CustomTime is a user-specified timestamp for the object
CustomTime time.Time
// ComponentCount is the number of source objects composed into this object
// Only set for objects created by composition (read-only)
ComponentCount int
// EventBasedHold indicates whether an event-based hold is active
EventBasedHold bool
// TemporaryHold indicates whether a temporary hold is active
TemporaryHold bool
// RetentionExpirationTime is when the retention period expires (read-only)
RetentionExpirationTime time.Time
// PredefinedACL is a predefined ACL to apply during creation/update
// Valid values: authenticatedRead, bucketOwnerFullControl, bucketOwnerRead,
// private, projectPrivate, publicRead
PredefinedACL string
// CustomerKeySHA256 is the SHA256 hash of the customer-supplied encryption key
// Read-only
CustomerKeySHA256 string
// Etag is the HTTP/1.1 Entity tag
Etag string
// SoftDeleteTime is when the object was soft-deleted (read-only)
SoftDeleteTime time.Time
// HardDeleteTime is when the object will be permanently deleted (read-only)
HardDeleteTime time.Time
// Retention contains the retention configuration for this object
// Only applicable for buckets with object retention enabled
Retention *ObjectRetention
}Attributes that can be updated on an existing object.
/**
* ObjectAttrsToUpdate specifies attributes to update on an object.
* Only non-nil fields are updated. Use optional types to distinguish
* between zero values and unset fields.
*/
type ObjectAttrsToUpdate struct {
// ContentType updates the MIME type
ContentType optional.String
// ContentLanguage updates the content language
ContentLanguage optional.String
// ContentEncoding updates the content encoding
ContentEncoding optional.String
// ContentDisposition updates the Content-Disposition header
ContentDisposition optional.String
// CacheControl updates the Cache-Control header
CacheControl optional.String
// EventBasedHold updates the event-based hold status
EventBasedHold optional.Bool
// TemporaryHold updates the temporary hold status
TemporaryHold optional.Bool
// CustomTime updates the custom timestamp
CustomTime time.Time
// Metadata replaces custom metadata
// Set to an empty map to delete all metadata
Metadata map[string]string
// PredefinedACL applies a predefined ACL
PredefinedACL string
// ACL is the access control list (not commonly used, prefer IAM)
ACL []ACLRule
}Preconditions for object operations to prevent race conditions.
/**
* Conditions constrain object methods to act on specific generations.
*/
type Conditions struct {
// GenerationMatch specifies that the object must have this generation
GenerationMatch int64
// GenerationNotMatch specifies that the object must not have this generation
GenerationNotMatch int64
// MetagenerationMatch specifies that the object must have this metageneration
MetagenerationMatch int64
// MetagenerationNotMatch specifies that the object must not have this metageneration
MetagenerationNotMatch int64
// DoesNotExist specifies that the object must not exist
// Use for conditional creates
DoesNotExist bool
}Reader for downloading object content.
/**
* Reader reads the contents of a Google Cloud Storage object.
* Implements io.Reader, io.ReaderAt, io.Seeker, io.WriterTo, and io.Closer.
*/
type Reader struct {
// Attrs contains a subset of object attributes (read-only)
Attrs ReaderObjectAttrs
// CacheControl is the Cache-Control header (read-only)
CacheControl string
// ContentEncoding is the Content-Encoding header (read-only)
ContentEncoding string
// ContentType is the Content-Type header (read-only)
ContentType string
// Size is the size of the object content (read-only)
Size int64
// contains filtered or unexported fields
}
/**
* Reads up to len(p) bytes into p.
* Implements io.Reader interface.
* @param p - Buffer to read into
* @returns Number of bytes read and error
*/
func (r *Reader) Read(p []byte) (int, error)
/**
* Reads len(p) bytes into p starting at offset off.
* Implements io.ReaderAt interface.
* @param p - Buffer to read into
* @param off - Offset to read from
* @returns Number of bytes read and error
*/
func (r *Reader) ReadAt(p []byte, off int64) (int, error)
/**
* Sets the offset for the next Read.
* Implements io.Seeker interface.
* @param offset - Offset value
* @param whence - Reference point (io.SeekStart, io.SeekCurrent, io.SeekEnd)
* @returns New offset and error
*/
func (r *Reader) Seek(offset int64, whence int) (int64, error)
/**
* Writes object content to w.
* Implements io.WriterTo interface.
* @param w - Writer to write to
* @returns Number of bytes written and error
*/
func (r *Reader) WriteTo(w io.Writer) (int64, error)
/**
* Closes the Reader and releases resources.
* @returns Error if close fails
*/
func (r *Reader) Close() errorSubset of object attributes available when reading.
/**
* ReaderObjectAttrs are attributes about the object being read.
* These are populated during NewReader/NewRangeReader.
* All fields are read-only.
*/
type ReaderObjectAttrs struct {
// Size is the length of the object's content
Size int64
// StartOffset is the byte offset from which reading begins
// Only non-zero for range requests
StartOffset int64
// ContentType is the MIME type of the object's content
ContentType string
// ContentEncoding is the encoding of the object's content
ContentEncoding string
// CacheControl specifies caching behavior
CacheControl string
// LastModified is when the object was last modified
LastModified time.Time
// Generation is the generation number of the object's content
Generation int64
// Metageneration is the metadata generation number
Metageneration int64
// CRC32C is the CRC32C checksum of the entire object's content
CRC32C uint32
// Decompressed is true if the object was automatically decompressed
// Objects with Content-Encoding: gzip are automatically decompressed
// Use ObjectHandle.ReadCompressed(true) to prevent decompression
Decompressed bool
}Usage Example:
import (
"context"
"io"
"cloud.google.com/go/storage"
)
// Download entire object
obj := client.Bucket("my-bucket").Object("my-object.txt")
r, err := obj.NewReader(ctx)
if err != nil {
log.Fatal(err)
}
defer r.Close()
// Read all content
data, err := io.ReadAll(r)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Object size: %d bytes\n", r.Attrs.Size)
fmt.Printf("Content-Type: %s\n", r.ContentType)
fmt.Printf("Generation: %d\n", r.Attrs.Generation)
// Download byte range
rangeReader, err := obj.NewRangeReader(ctx, 0, 1024) // First 1KB
if err != nil {
log.Fatal(err)
}
defer rangeReader.Close()
// Download with conditions
conditionalObj := obj.If(storage.Conditions{
GenerationMatch: 12345,
})
r, err = conditionalObj.NewReader(ctx)
// Download specific generation
oldVersion := obj.Generation(12345)
r, err = oldVersion.NewReader(ctx)
// Download encrypted object
encryptedObj := obj.Key([]byte("my-32-byte-aes-256-key-here!"))
r, err = encryptedObj.NewReader(ctx)Writer for uploading object content.
/**
* Writer writes a Cloud Storage object.
* Implements io.Writer and io.Closer.
*/
type Writer struct {
// ObjectAttrs are optional attributes to set on the object
// Must be initialized before the first Write call
ObjectAttrs
// SendCRC32C specifies whether to transmit a CRC32C checksum
// Set to true and populate Writer's CRC32C field to send checksum
// GCS will reject writes if the checksum doesn't match
SendCRC32C bool
// DisableAutoChecksum disables automatic CRC32C calculation in gRPC
// By default, gRPC automatically calculates and validates checksums
// Set to true to disable for better performance (gRPC only)
DisableAutoChecksum bool
// ChunkSize controls the maximum bytes per request
// Objects smaller than ChunkSize are sent in a single request
// Larger objects are split over multiple requests
// Default: 16 MiB. Set to 0 to disable chunking
ChunkSize int
// ChunkRetryDeadline sets per-chunk retry deadline for resumable uploads
// Default: 32 seconds
ChunkRetryDeadline time.Duration
// ChunkTransferTimeout sets per-chunk request timeout
// Only applicable for HTTP/JSON client
// Default: no timeout
ChunkTransferTimeout time.Duration
// ForceEmptyContentType disables auto-detection of Content-Type
// By default, blank Content-Type triggers auto-detection
ForceEmptyContentType bool
// Append enables appendable object semantics (gRPC only, preview)
// Objects are visible on first Write and can be appended to
// Use FinalizeOnClose to finalize on Close
Append bool
// FinalizeOnClose finalizes appendable objects when Close is called
// Only applies when Append is true (gRPC only, preview)
FinalizeOnClose bool
// ProgressFunc is called periodically with bytes written
// Useful for monitoring upload progress
ProgressFunc func(int64)
// contains filtered or unexported fields
}
/**
* Writes data to the object.
* Implements io.Writer interface.
* @param p - Data to write
* @returns Number of bytes written and error
*/
func (w *Writer) Write(p []byte) (n int, err error)
/**
* Completes the write operation and closes the Writer.
* The object is not created until Close is called.
* @returns Error if close fails
*/
func (w *Writer) Close() error
/**
* Aborts the write operation with an error.
* The object is not created.
* Always returns nil.
* @param err - Error to abort with
* @returns nil
*/
func (w *Writer) CloseWithError(err error) error
/**
* Syncs buffered data to GCS without finalizing (appendable objects only).
* Only applicable when Append is true (gRPC only, preview).
* @returns Error if flush fails
*/
func (w *Writer) Flush() error
/**
* Returns the object's attributes after a successful write.
* Only valid after Close returns without error.
* @returns ObjectAttrs
*/
func (w *Writer) Attrs() *ObjectAttrsUsage Example:
import (
"context"
"cloud.google.com/go/storage"
)
// Simple upload
obj := client.Bucket("my-bucket").Object("my-object.txt")
w := obj.NewWriter(ctx)
if _, err := w.Write([]byte("Hello, World!")); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Upload with metadata
w = obj.NewWriter(ctx)
w.ContentType = "text/plain"
w.CacheControl = "public, max-age=3600"
w.Metadata = map[string]string{
"custom-key": "custom-value",
}
if _, err := w.Write(data); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Upload with CRC32C checksum
w = obj.NewWriter(ctx)
w.CRC32C = uint32(12345) // Calculate this from your data
w.SendCRC32C = true
if _, err := w.Write(data); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Upload with custom chunk size
w = obj.NewWriter(ctx)
w.ChunkSize = 1024 * 1024 // 1 MiB chunks
if _, err := w.Write(largeData); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Upload with progress monitoring
w = obj.NewWriter(ctx)
w.ProgressFunc = func(bytes int64) {
fmt.Printf("Uploaded %d bytes\n", bytes)
}
if _, err := w.Write(data); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Get attributes after successful upload
w = obj.NewWriter(ctx)
if _, err := w.Write(data); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
attrs := w.Attrs()
fmt.Printf("Created object: generation=%d, size=%d\n", attrs.Generation, attrs.Size)
// Conditional upload (create only if doesn't exist)
conditionalObj := obj.If(storage.Conditions{DoesNotExist: true})
w = conditionalObj.NewWriter(ctx)
if _, err := w.Write(data); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
// Will fail if object already exists
log.Fatal(err)
}
// Upload encrypted object
encryptedObj := obj.Key([]byte("my-32-byte-aes-256-key-here!"))
w = encryptedObj.NewWriter(ctx)
if _, err := w.Write(sensitiveData); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}Copy objects within or across buckets.
/**
* Copier copies a source object to a destination.
*/
type Copier struct {
// ObjectAttrs are optional attributes for the destination object
// Nil or zero-valued attributes are ignored
ObjectAttrs
// RewriteToken can be set before running to resume a copy
// After a call to Run, RewriteToken is updated for resuming
RewriteToken string
// ProgressFunc is called periodically with bytes copied and total bytes
ProgressFunc func(copiedBytes, totalBytes uint64)
// DestinationKMSKeyName is the Cloud KMS key for the destination object
DestinationKMSKeyName string
// contains filtered or unexported fields (src, dst)
}
/**
* Executes the copy operation.
* For large objects, this may need to be called multiple times.
* @param ctx - Context for the operation
* @returns Destination ObjectAttrs and error
*/
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error)Usage Example:
// Simple copy within same bucket
src := client.Bucket("my-bucket").Object("source.txt")
dst := client.Bucket("my-bucket").Object("destination.txt")
copier := dst.CopierFrom(src)
attrs, err := copier.Run(ctx)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Copied to generation %d\n", attrs.Generation)
// Copy across buckets
src = client.Bucket("source-bucket").Object("source.txt")
dst = client.Bucket("dest-bucket").Object("destination.txt")
copier = dst.CopierFrom(src)
attrs, err = copier.Run(ctx)
// Copy with metadata changes
copier = dst.CopierFrom(src)
copier.ContentType = "text/plain"
copier.Metadata = map[string]string{"copied": "true"}
attrs, err = copier.Run(ctx)
// Copy with progress monitoring
copier = dst.CopierFrom(src)
copier.ProgressFunc = func(copied, total uint64) {
fmt.Printf("Progress: %d / %d bytes\n", copied, total)
}
attrs, err = copier.Run(ctx)
// Copy with KMS encryption
copier = dst.CopierFrom(src)
copier.DestinationKMSKeyName = "projects/P/locations/L/keyRings/R/cryptoKeys/K"
attrs, err = copier.Run(ctx)
// Resume interrupted copy
copier = dst.CopierFrom(src)
attrs, err = copier.Run(ctx)
if err != nil && copier.RewriteToken != "" {
// Save token and retry later
token := copier.RewriteToken
// Later, resume the copy
newCopier := dst.CopierFrom(src)
newCopier.RewriteToken = token
attrs, err = newCopier.Run(ctx)
}Compose multiple objects into a single object.
/**
* Composer composes source objects into a destination object.
* The destination object is created or overwritten.
* Up to 32 source objects can be composed in a single operation.
*/
type Composer struct {
// ObjectAttrs are optional attributes for the composed object
ObjectAttrs
// contains filtered or unexported fields
}
/**
* Executes the composition.
* @param ctx - Context for the operation
* @returns Composed ObjectAttrs and error
*/
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error)Usage Example:
// Compose multiple objects
bucket := client.Bucket("my-bucket")
obj1 := bucket.Object("part1.txt")
obj2 := bucket.Object("part2.txt")
obj3 := bucket.Object("part3.txt")
dst := bucket.Object("composed.txt")
composer := dst.ComposerFrom(obj1, obj2, obj3)
attrs, err := composer.Run(ctx)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Composed object: %d bytes\n", attrs.Size)
// Compose with metadata
composer = dst.ComposerFrom(obj1, obj2)
composer.ContentType = "text/plain"
composer.Metadata = map[string]string{"composed": "true"}
attrs, err = composer.Run(ctx)
// Note: GCS has a limit of 32 source objects per composition
// For more than 32 objects, compose in stages:
// 1. Compose first 32 into intermediate1
// 2. Compose next 32 into intermediate2
// 3. Compose intermediate1 and intermediate2 into finalQuery and iterate over objects in a bucket.
/**
* Query represents a query to filter objects from a bucket.
*/
type Query struct {
// Prefix filters objects whose names begin with this prefix
Prefix string
// Delimiter causes results to be grouped by prefix up to the delimiter
// Used to simulate directory listings
Delimiter string
// StartOffset filters objects lexicographically >= this value
StartOffset string
// EndOffset filters objects lexicographically < this value
EndOffset string
// Versions includes non-current object versions
Versions bool
// IncludeTrailingDelimiter includes the delimiter in prefix results
IncludeTrailingDelimiter bool
// MatchGlob filters objects using a glob pattern
// Example: "**/*.txt"
MatchGlob string
// Projection controls which object attributes are returned
// Values: ProjectionFull, ProjectionNoACL
Projection Projection
}
/**
* Sets specific attributes to retrieve for objects.
* Reduces response size and improves performance.
* @param attrs - List of attribute names to retrieve
*/
func (q *Query) SetAttrSelection(attrs []string)
/**
* Projection controls attribute inclusion in results.
*/
type Projection string
const (
// ProjectionFull returns all object metadata
ProjectionFull Projection = "full"
// ProjectionNoACL omits ACL metadata
ProjectionNoACL Projection = "noAcl"
)
/**
* ObjectIterator is an iterator over objects in a bucket.
*/
type ObjectIterator struct {
// contains filtered or unexported fields
}
/**
* Returns the next object. Returns iterator.Done when complete.
* @returns ObjectAttrs and error
*/
func (it *ObjectIterator) Next() (*ObjectAttrs, error)
/**
* Returns pagination information for controlling iteration.
* @returns iterator.PageInfo
*/
func (it *ObjectIterator) PageInfo() *iterator.PageInfoUsage Example:
import (
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
)
// List all objects in bucket
bucket := client.Bucket("my-bucket")
it := bucket.Objects(ctx, nil)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Printf("Object: %s, Size: %d\n", attrs.Name, attrs.Size)
}
// List objects with prefix
query := &storage.Query{Prefix: "photos/"}
it = bucket.Objects(ctx, query)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Println(attrs.Name)
}
// Simulate directory listing
query = &storage.Query{
Prefix: "photos/",
Delimiter: "/",
}
it = bucket.Objects(ctx, query)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
if attrs.Prefix != "" {
fmt.Printf("Directory: %s\n", attrs.Prefix)
} else {
fmt.Printf("File: %s\n", attrs.Name)
}
}
// List with pagination
query = &storage.Query{Prefix: "logs/"}
it = bucket.Objects(ctx, query)
pageInfo := it.PageInfo()
pageInfo.MaxSize = 100 // 100 objects per page
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Println(attrs.Name)
}
// List specific generations (versioned bucket)
query = &storage.Query{
Prefix: "document.txt",
Versions: true,
}
it = bucket.Objects(ctx, query)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Printf("Generation %d: created %v\n", attrs.Generation, attrs.Created)
}
// Filter by date range (using StartOffset/EndOffset)
query = &storage.Query{
StartOffset: "logs/2024-01-01",
EndOffset: "logs/2024-02-01",
}
it = bucket.Objects(ctx, query)
// Use glob pattern
query = &storage.Query{
MatchGlob: "**.txt", // All .txt files
}
it = bucket.Objects(ctx, query)
// Optimize with attribute selection
query = &storage.Query{Prefix: "photos/"}
query.SetAttrSelection([]string{"Name", "Size", "Updated"})
it = bucket.Objects(ctx, query)Constants for object storage classes.
const (
// StorageClassStandard for frequently accessed data
StorageClassStandard = "STANDARD"
// StorageClassNearline for data accessed less than once per month
StorageClassNearline = "NEARLINE"
// StorageClassColdline for data accessed less than once per quarter
StorageClassColdline = "COLDLINE"
// StorageClassArchive for long-term data accessed less than once per year
StorageClassArchive = "ARCHIVE"
// StorageClassMultiRegional is deprecated, use STANDARD
StorageClassMultiRegional = "MULTI_REGIONAL"
// StorageClassRegional is deprecated, use STANDARD
StorageClassRegional = "REGIONAL"
)Types for advanced object operations.
/**
* MoveObjectDestination specifies the destination for Move operations.
*/
type MoveObjectDestination struct {
// Object is the destination object name
Object string
// Conditions are optional preconditions for the destination
Conditions *Conditions
}Move Operation Usage Examples:
import (
"context"
"fmt"
"log"
"cloud.google.com/go/storage"
)
// Example 1: Simple move within the same bucket
func moveObjectSimple(client *storage.Client) {
ctx := context.Background()
src := client.Bucket("my-bucket").Object("old-name.txt")
dst := storage.MoveObjectDestination{
Object: "new-name.txt",
}
attrs, err := src.Move(ctx, dst)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Moved to: %s\n", attrs.Name)
}
// Example 2: Move with preconditions (fail if destination exists)
func moveWithPreconditions(client *storage.Client) {
ctx := context.Background()
src := client.Bucket("my-bucket").Object("source.txt")
dst := storage.MoveObjectDestination{
Object: "destination.txt",
Conditions: &storage.Conditions{
DoesNotExist: true, // Only move if destination doesn't exist
},
}
attrs, err := src.Move(ctx, dst)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Successfully moved to: %s\n", attrs.Name)
}
// Example 3: Move with generation matching (ensure source hasn't changed)
func moveWithGenerationMatch(client *storage.Client) {
ctx := context.Background()
// Get current attributes to get the generation
src := client.Bucket("my-bucket").Object("source.txt")
attrs, err := src.Attrs(ctx)
if err != nil {
log.Fatal(err)
}
// Move only if the source generation matches (hasn't been modified)
srcWithCondition := src.If(storage.Conditions{GenerationMatch: attrs.Generation})
dst := storage.MoveObjectDestination{
Object: "destination.txt",
}
newAttrs, err := srcWithCondition.Move(ctx, dst)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Moved object generation %d to: %s\n", newAttrs.Generation, newAttrs.Name)
}/**
* RestoreOptions configures object restoration from soft delete.
*/
type RestoreOptions struct {
// CopySourceACL indicates whether to copy access controls from source
// Only valid for buckets with fine-grained access
// Error if uniform bucket-level access is enabled
CopySourceACL bool
}
/**
* ObjectRetention contains retention configuration for an object.
* Only applicable when object retention is enabled on the bucket.
*/
type ObjectRetention struct {
// Mode is the retention policy mode
// Valid values: "Locked", "Unlocked"
// Locked policies cannot be changed
// Unlocked policies require override to change
Mode string
// RetainUntil is when retention expires
RetainUntil time.Time
}
/**
* ObjectContexts is a container for custom object contexts.
*/
type ObjectContexts struct {
// Custom contains user-defined object contexts
Custom map[string]ObjectCustomContextPayload
}
/**
* ObjectCustomContextPayload holds a custom object context value.
*/
type ObjectCustomContextPayload struct {
// Value is the context value
Value string
// Delete marks this context for deletion
Delete bool
// CreateTime is when the context was created (read-only)
CreateTime time.Time
// UpdateTime is when the context was updated (read-only)
UpdateTime time.Time
}
/**
* ReadHandle is associated with an object for optimized reads.
* This is periodically refreshed.
*/
type ReadHandle []byte
/**
* AppendableWriterOpts provides options for NewWriterFromAppendableObject.
* Only supported for gRPC clients with appendable objects.
* This feature is in preview.
*/
type AppendableWriterOpts struct {
// ChunkSize controls upload chunk size (see Writer.ChunkSize)
ChunkSize int
// ChunkRetryDeadline sets per-chunk retry deadline (see Writer.ChunkRetryDeadline)
ChunkRetryDeadline time.Duration
// ProgressFunc monitors upload progress (see Writer.ProgressFunc)
ProgressFunc func(int64)
// FinalizeOnClose finalizes object on close (see Writer.FinalizeOnClose)
FinalizeOnClose bool
}NewWriterFromAppendableObject Usage Examples:
import (
"context"
"fmt"
"log"
"cloud.google.com/go/storage"
)
// Example 1: Create an appendable object and resume writing
func createAndResumeAppendableObject(client *storage.Client) {
ctx := context.Background()
obj := client.Bucket("my-bucket").Object("appendable-log.txt")
// Step 1: Create initial appendable object
w := obj.NewWriter(ctx)
w.Append = true // Mark as appendable
if _, err := w.Write([]byte("Initial log entry\n")); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Step 2: Get the object's generation to resume appending
attrs, err := obj.Attrs(ctx)
if err != nil {
log.Fatal(err)
}
// Step 3: Resume writing from the appendable object
w2, offset, err := obj.Generation(attrs.Generation).NewWriterFromAppendableObject(ctx, &storage.AppendableWriterOpts{
ChunkSize: 8 * 1024 * 1024, // 8 MiB
FinalizeOnClose: true, // Finalize on close (object becomes immutable)
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("Resuming from offset: %d bytes\n", offset)
if _, err := w2.Write([]byte("Additional log entry\n")); err != nil {
log.Fatal(err)
}
if err := w2.Close(); err != nil {
log.Fatal(err)
}
fmt.Println("Successfully appended and finalized object")
}
// Example 2: Append multiple times without finalizing
func multipleAppendsWithoutFinalizing(client *storage.Client) {
ctx := context.Background()
obj := client.Bucket("my-bucket").Object("streaming-log.txt")
// Create initial appendable object
w := obj.NewWriter(ctx)
w.Append = true
if _, err := w.Write([]byte("Log 1\n")); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
// Append again without finalizing
for i := 2; i <= 5; i++ {
attrs, err := obj.Attrs(ctx)
if err != nil {
log.Fatal(err)
}
w, offset, err := obj.Generation(attrs.Generation).NewWriterFromAppendableObject(ctx, &storage.AppendableWriterOpts{
FinalizeOnClose: false, // Keep object appendable
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("Append %d at offset %d\n", i, offset)
if _, err := w.Write([]byte(fmt.Sprintf("Log %d\n", i))); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
}
fmt.Println("Multiple appends completed, object still appendable")
}
// Example 3: Append with progress monitoring
func appendWithProgress(client *storage.Client) {
ctx := context.Background()
obj := client.Bucket("my-bucket").Object("large-appendable.bin")
// Get current object attributes
attrs, err := obj.Attrs(ctx)
if err != nil {
log.Fatal(err)
}
// Resume with progress monitoring
w, offset, err := obj.Generation(attrs.Generation).NewWriterFromAppendableObject(ctx, &storage.AppendableWriterOpts{
ChunkSize: 1024 * 1024, // 1 MiB chunks
ProgressFunc: func(bytesWritten int64) {
fmt.Printf("Progress: %d bytes written\n", bytesWritten)
},
FinalizeOnClose: true,
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("Starting from offset: %d\n", offset)
// Write large data
largeData := make([]byte, 10*1024*1024) // 10 MiB
if _, err := w.Write(largeData); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
}/**
* MultiRangeDownloader reads multiple byte ranges from an object.
* This API is currently in preview.
*/
type MultiRangeDownloader struct {
// Attrs contains object attributes (populated on creation)
Attrs ReaderObjectAttrs
}
/**
* Adds a range to download.
* @param output - Writer for downloaded data
* @param offset - Starting byte offset
* @param length - Number of bytes to read
* @param callback - Completion callback with (bytesRead, totalBytes, error)
*/
func (mrd *MultiRangeDownloader) Add(output io.Writer, offset, length int64, callback func(int64, int64, error))
/**
* Closes the downloader and releases resources.
* @returns Error if close fails
*/
func (mrd *MultiRangeDownloader) Close() error
/**
* Returns any error from the download operation.
* @returns Error
*/
func (mrd *MultiRangeDownloader) Error() error
/**
* Returns the read handle for the download.
* @returns Handle bytes
*/
func (mrd *MultiRangeDownloader) GetHandle() []byte
/**
* Waits for all ranges to complete downloading.
*/
func (mrd *MultiRangeDownloader) Wait()Install with Tessl CLI
npx tessl i tessl/golang-cloud-google-com-go-storage