AWS SDK for Go v2 with 130+ service clients, Request/Send pattern, and context support.
service/s3/s3manager)Import: github.com/aws/aws-sdk-go-v2/service/s3/s3manager
High-level utilities for uploading and downloading S3 objects with automatic multipart handling and concurrent transfers.
const (
DefaultUploadConcurrency = 5
DefaultDownloadConcurrency = 5
DefaultUploadPartSize = MinUploadPartSize // 5MB
DefaultDownloadPartSize = 1024 * 1024 * 5 // 5MB
MinUploadPartSize int64 = 1024 * 1024 * 5 // 5MB minimum
MaxUploadParts = 10000
DefaultBatchSize = 100
ErrDeleteBatchFailCode = "DeleteBatchError"
)Concurrent multipart upload. Automatically uses multipart when content exceeds PartSize. Handles io.Reader (does not require io.ReadSeeker).
type Uploader struct {
PartSize int64 // default: MinUploadPartSize (5MB)
Concurrency int // default: DefaultUploadConcurrency (5)
LeavePartsOnError bool // don't abort multipart on failure
MaxUploadParts int // default: 10000
S3 s3iface.S3API // S3 client
RequestOptions []request.Option
}
func NewUploader(cfg aws.Config, options ...func(*Uploader)) *Uploader
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error)
func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error)
func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error
type UploadInput struct {
Bucket *string
Key *string
Body io.Reader // does NOT require io.ReadSeeker
ACL s3.ObjectCannedACL
CacheControl *string
ContentDisposition *string
ContentEncoding *string
ContentLanguage *string
ContentType *string
Expires *time.Time
GrantFullControl *string
GrantRead *string
GrantReadACP *string
GrantWriteACP *string
Metadata map[string]*string
RequestPayer s3.RequestPayer
SSECustomerAlgorithm *string
SSECustomerKey *string
SSECustomerKeyMD5 *string
SSEKMSKeyId *string
ServerSideEncryption s3.ServerSideEncryption
StorageClass s3.StorageClass
Tagging *string
WebsiteRedirectLocation *string
}
type UploadOutput struct {
Location string // URL of the uploaded object
VersionID *string // version ID if bucket is versioned
UploadID string // multipart upload ID (empty for single-part)
}
func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader)uploader := s3manager.NewUploader(cfg)
uploader.PartSize = 64 * 1024 * 1024 // 64MB parts
f, _ := os.Open("large-file.zip")
defer f.Close()
result, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String("my-bucket"),
Key: aws.String("large-file.zip"),
Body: f,
ContentType: aws.String("application/zip"),
})
if err != nil {
if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
fmt.Println("Failed upload ID:", multierr.UploadID())
}
}
fmt.Println("Uploaded to:", result.Location)Concurrent range-based download. Writes to io.WriterAt (allows concurrent writes to non-sequential positions).
type Downloader struct {
PartSize int64 // default: DefaultDownloadPartSize (5MB)
Concurrency int // default: DefaultDownloadConcurrency (5)
S3 s3iface.S3API // S3 client
RequestOptions []request.Option
}
func NewDownloader(cfg aws.Config, options ...func(*Downloader)) *Downloader
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader
func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error)
func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error)
func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error
func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader)downloader := s3manager.NewDownloader(cfg)
// Download to file
f, _ := os.Create("downloaded-file.zip")
defer f.Close()
n, err := downloader.Download(f, &s3.GetObjectInput{
Bucket: aws.String("my-bucket"),
Key: aws.String("large-file.zip"),
})
fmt.Println("Downloaded", n, "bytes")
// Download to in-memory buffer
buf := aws.NewWriteAtBuffer([]byte{})
n, err := downloader.Download(buf, &s3.GetObjectInput{
Bucket: aws.String("my-bucket"),
Key: aws.String("small-file.txt"),
})
fmt.Println("Content:", string(buf.Bytes()))Delete multiple S3 objects in batches.
type BatchDelete struct {
Client s3iface.S3API
BatchSize int // default: DefaultBatchSize (100)
}
func NewBatchDelete(cfg aws.Config, options ...func(*BatchDelete)) *BatchDelete
func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete
func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error
// Iterator interface for batch delete
type BatchDeleteIterator interface {
Next() bool
Err() error
DeleteObject() BatchDeleteObject
}
type BatchDeleteObject struct {
Object *s3.DeleteObjectInput
After func() error // called after each delete attempt
}
// Built-in iterator implementations:
// DeleteObjectsIterator: delete from an explicit list
type DeleteObjectsIterator struct {
Objects []BatchDeleteObject
// unexported fields
}
// DeleteListIterator: delete all objects from a ListObjects call
func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator// Delete specific objects
batcher := s3manager.NewBatchDelete(cfg)
err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
Objects: []s3manager.BatchDeleteObject{
{Object: &s3.DeleteObjectInput{Bucket: aws.String("my-bucket"), Key: aws.String("file1.txt")}},
{Object: &s3.DeleteObjectInput{Bucket: aws.String("my-bucket"), Key: aws.String("file2.txt")}},
},
})
// Delete all objects matching a prefix
listInput := &s3.ListObjectsInput{
Bucket: aws.String("my-bucket"),
Prefix: aws.String("old-logs/"),
}
iter := s3manager.NewDeleteListIterator(s3.New(cfg), listInput)
err := batcher.Delete(aws.BackgroundContext(), iter)type BatchUploadIterator interface {
Next() bool
Err() error
UploadObject() BatchUploadObject
}
type BatchUploadObject struct {
Object *UploadInput
After func() error
}
type UploadObjectsIterator struct {
Objects []BatchUploadObject
// unexported fields
}
func (batcher *UploadObjectsIterator) Next() bool
func (batcher *UploadObjectsIterator) Err() error
func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObjecttype BatchDownloadIterator interface {
Next() bool
Err() error
DownloadObject() BatchDownloadObject
}
type BatchDownloadObject struct {
Object *s3.GetObjectInput
Writer io.WriterAt
After func() error
}
type DownloadObjectsIterator struct {
Objects []BatchDownloadObject
// unexported fields
}func GetBucketRegion(ctx aws.Context, cfg aws.Config, bucket, regionHint string, opts ...aws.Option) (string, error)
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...aws.Option) (string, error)// Determine which region a bucket is in
region, err := s3manager.GetBucketRegion(ctx, cfg, "my-bucket", "us-west-2")type BatchError struct {
Errors Errors
// unexported fields
}
func (err *BatchError) Code() string
func (err *BatchError) Message() string
func (err *BatchError) Error() string
func (err *BatchError) OrigErr() error
func (err *BatchError) OrigErrs() []error
type Error struct {
OrigErr error
Bucket *string
Key *string
}
type Errors []Error
func (es Errors) Error() string
func NewBatchError(code, message string, err []Error) awserr.Error
// MultiUploadFailure - returned by Upload on multipart failure
type MultiUploadFailure interface {
error
UploadID() string // failed upload ID for manual cleanup
}service/s3/s3crypto)Import: github.com/aws/aws-sdk-go-v2/service/s3/s3crypto
Client-side encryption and decryption of S3 objects. Supports AES-GCM (authenticated encryption) and AES-CBC with KMS key wrapping.
const (
AESCBC = "AES/CBC"
AESGCMNoPadding = "AES/GCM/NoPadding"
KMSWrap = "kms"
DefaultInstructionKeySuffix = ".instruction"
DefaultMinFileSize = 1024 * 512 * 5 // 2.5MB
)var AESCBCPadder = Padder(aescbcPadding) // PKCS5 padding for AES-CBC
var NoPadder = Padder(noPadder{})type EncryptionConfig struct {
SaveStrategy SaveStrategy // where to store encryption envelope
InstructionFileSuffix string // suffix for instruction files
TempFolderPath string // folder for temp files
MinFileSize int64 // threshold for temp file vs memory
}
func NewEncryptionClient(cfg aws.Config, contentCipherBuilder ContentCipherBuilder, options ...func(*EncryptionClient)) (*EncryptionClient, error)
// EncryptionClient methods
func (c *EncryptionClient) PutObjectRequest(input *s3.PutObjectInput) s3.PutObjectRequesttype DecryptionClient struct {
// Registry for custom wrap algorithms
WrapRegistry map[string]WrapEntry
// Registry for custom content encryption algorithms
CEKRegistry map[string]CEKEntry
// Has unexported fields
}
func NewDecryptionClient(cfg aws.Config, options ...func(*DecryptionClient)) *DecryptionClient
func (c *DecryptionClient) GetObjectRequest(input *s3.GetObjectInput) s3.GetObjectRequestfunc NewKMSKeyGenerator(kmsClient kmsiface.KMSAPI, cmkID string) CipherDataGenerator
func NewKMSKeyGeneratorWithMatDesc(kmsClient kmsiface.KMSAPI, cmkID string, matdesc MaterialDescription) CipherDataGenerator
type MaterialDescription map[string]stringfunc AESGCMContentCipherBuilder(generator CipherDataGenerator) ContentCipherBuilder
func AESCBCContentCipherBuilder(generator CipherDataGenerator, padder Padder) ContentCipherBuilderControls where the encryption envelope is stored.
type SaveStrategy interface {
Save(Envelope, *s3.PutObjectInput, *aws.Request) error
}
// Store envelope in object header (default)
func NewHeaderSaveStrategy() SaveStrategy
// Store envelope in a separate S3 object (instruction file)
func NewS3SaveStrategy(cfg aws.Config, suffix string) SaveStrategytype ContentCipherBuilder interface {
NewCipher() (ContentCipher, error)
}
type ContentCipher interface {
EncryptContents(src io.Reader) (io.Reader, error)
DecryptContents(src io.ReadCloser) (io.ReadCloser, error)
GetCipherData() CipherData
}
type CipherDataGenerator interface {
GenerateCipherData(keySize, ivSize int) (CipherData, error)
}
type CipherDataDecrypter interface {
DecryptKey([]byte) ([]byte, error)
}
type Cipher interface {
Encrypter
Decrypter
}
type Padder interface {
Pad([]byte, int) ([]byte, error)
Unpad([]byte) ([]byte, error)
Name() string
}
type WrapEntry func(Envelope) (CipherDataDecrypter, error)
type CEKEntry func(CipherData) (ContentCipher, error)type CipherData struct {
Key []byte
IV []byte
WrapAlgorithm string
CEKAlgorithm string
TagLength string
MaterialDescription MaterialDescription
EncryptedKey []byte
Padder Padder
}import (
"github.com/aws/aws-sdk-go-v2/service/kms"
"github.com/aws/aws-sdk-go-v2/service/kms/kmsiface"
"github.com/aws/aws-sdk-go-v2/service/s3/s3crypto"
)
cfg, _ := external.LoadDefaultAWSConfig()
cfg.Region = "us-east-1"
cmkID := "arn:aws:kms:us-east-1:123456789012:key/mykey"
kmsClient := kms.New(cfg)
// Create key generator using KMS
handler := s3crypto.NewKMSKeyGenerator(kmsClient, cmkID)
// Create encryption client using AES-GCM
encClient, err := s3crypto.NewEncryptionClient(cfg, s3crypto.AESGCMContentCipherBuilder(handler))
// Encrypt and upload
req := encClient.PutObjectRequest(&s3.PutObjectInput{
Bucket: aws.String("my-bucket"),
Key: aws.String("encrypted-file.txt"),
Body: strings.NewReader("secret data"),
})
resp, err := req.Send()
// Create decryption client
decClient := s3crypto.NewDecryptionClient(cfg)
// Decrypt and download
getReq := decClient.GetObjectRequest(&s3.GetObjectInput{
Bucket: aws.String("my-bucket"),
Key: aws.String("encrypted-file.txt"),
})
getResp, err := getReq.Send()
defer getResp.Body.Close()
data, _ := ioutil.ReadAll(getResp.Body)
fmt.Println("Decrypted:", string(data))
// Register custom algorithms
decClient.WrapRegistry["AESWrap"] = myAESWrapEntry
decClient.CEKRegistry["AES/CTR/NoPadding"] = myAESCTREntryInstall with Tessl CLI
npx tessl i tessl/golang-github-com--aws--aws-sdk-go-v2@0.4.0