Comprehensive documentation for S3 multipart upload operations in the AWS SDK for Go v2.
Multipart uploads allow you to upload large objects (up to 5 TB) in parts, enabling pause/resume functionality, improved throughput through parallel uploads, and the ability to upload objects before knowing the total size. A multipart upload consists of three steps:
CreateMultipartUploadUploadPart or UploadPartCopyCompleteMultipartUpload or abort with AbortMultipartUploadInitiates a multipart upload and returns an upload ID used to associate all parts in the upload.
func (c *Client) CreateMultipartUpload(
ctx context.Context,
params *CreateMultipartUploadInput,
optFns ...func(*Options)
) (*CreateMultipartUploadOutput, error)type CreateMultipartUploadInput struct {
// Required: The name of the bucket where the multipart upload is initiated
Bucket *string
// Required: Object key for which the multipart upload is to be initiated
Key *string
// The canned ACL to apply to the object (not supported for directory buckets)
ACL types.ObjectCannedACL
// Whether Amazon S3 should use an S3 Bucket Key for object encryption with SSE-KMS
BucketKeyEnabled *bool
// Specifies caching behavior along the request/reply chain
CacheControl *string
// Algorithm to create the checksum for the object (CRC32, CRC32C, SHA1, SHA256)
ChecksumAlgorithm types.ChecksumAlgorithm
// Checksum type that determines how part-level checksums combine to create object-level checksum
ChecksumType types.ChecksumType
// Specifies presentational information for the object
ContentDisposition *string
// Content encodings applied to the object
ContentEncoding *string
// The language that the content is in
ContentLanguage *string
// A standard MIME type describing the format of the object data
ContentType *string
// The account ID of the expected bucket owner
ExpectedBucketOwner *string
// The date and time at which the object is no longer cacheable
Expires *time.Time
// Grant full control permissions (not supported for directory buckets)
GrantFullControl *string
// Grant read permissions (not supported for directory buckets)
GrantRead *string
// Grant read ACL permissions (not supported for directory buckets)
GrantReadACP *string
// Grant write ACL permissions (not supported for directory buckets)
GrantWriteACP *string
// A map of metadata to store with the object in S3
Metadata map[string]string
// Legal hold status to apply to the uploaded object (not supported for directory buckets)
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// Object Lock mode to apply (not supported for directory buckets)
ObjectLockMode types.ObjectLockMode
// Date and time when Object Lock expires (not supported for directory buckets)
ObjectLockRetainUntilDate *time.Time
// Confirms requester knows they will be charged (not supported for directory buckets)
RequestPayer types.RequestPayer
// Algorithm to use for SSE-C encryption (not supported for directory buckets)
SSECustomerAlgorithm *string
// Customer-provided encryption key for SSE-C (not supported for directory buckets)
SSECustomerKey *string
// MD5 digest of the SSE-C encryption key (not supported for directory buckets)
SSECustomerKeyMD5 *string
// KMS Encryption Context as Base64 encoded UTF-8 JSON
SSEKMSEncryptionContext *string
// KMS key ID (Key ID, Key ARN, or Key Alias) for object encryption
SSEKMSKeyId *string
// Server-side encryption algorithm (AES256, aws:kms, aws:kms:dsse)
ServerSideEncryption types.ServerSideEncryption
// Storage class (STANDARD, REDUCED_REDUNDANCY, GLACIER, etc.)
StorageClass types.StorageClass
// Tag-set for the object (not supported for directory buckets)
Tagging *string
// Redirect requests for this object to another object or URL (not supported for directory buckets)
WebsiteRedirectLocation *string
}type CreateMultipartUploadOutput struct {
// Date when the initiated multipart upload becomes eligible for abort
AbortDate *time.Time
// Lifecycle rule ID that defines the abort action
AbortRuleId *string
// Name of the bucket to which the multipart upload was initiated
Bucket *string
// Whether the multipart upload uses an S3 Bucket Key for SSE-KMS
BucketKeyEnabled *bool
// Algorithm used to create a checksum of the object
ChecksumAlgorithm types.ChecksumAlgorithm
// Checksum type that will be used
ChecksumType types.ChecksumType
// Object key for which the multipart upload was initiated
Key *string
// Indicates requester was successfully charged (not supported for directory buckets)
RequestCharged types.RequestCharged
// SSE-C algorithm confirmation (not supported for directory buckets)
SSECustomerAlgorithm *string
// SSE-C key MD5 for integrity verification (not supported for directory buckets)
SSECustomerKeyMD5 *string
// KMS Encryption Context if present
SSEKMSEncryptionContext *string
// ID of the KMS key used for object encryption
SSEKMSKeyId *string
// Server-side encryption algorithm used
ServerSideEncryption types.ServerSideEncryption
// Required: ID for the initiated multipart upload
UploadId *string
// Metadata pertaining to the operation's result
ResultMetadata middleware.Metadata
}This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all parts in the specific multipart upload. You specify this upload ID in each subsequent upload part request. You also include this upload ID in the final request to either complete or abort the multipart upload.
After initiating a multipart upload and uploading one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you only after you either complete or abort a multipart upload.
Directory buckets: For directory buckets, you must make requests to the Zonal endpoint using virtual-hosted-style requests. Path-style requests are not supported.
Permissions:
s3:PutObject permission and KMS permissions for encryptionCreateSession API for session-based authorization with s3express:CreateSession permissionEncryption:
Uploads a part in a multipart upload. Parts can be uploaded in any order.
func (c *Client) UploadPart(
ctx context.Context,
params *UploadPartInput,
optFns ...func(*Options)
) (*UploadPartOutput, error)type UploadPartInput struct {
// Required: The name of the bucket to which the multipart upload was initiated
Bucket *string
// Required: Object key for which the multipart upload was initiated
Key *string
// Required: Part number (1-10,000). Uniquely identifies the part and its position
PartNumber *int32
// Required: Upload ID identifying the multipart upload
UploadId *string
// Object data to upload
Body io.Reader
// Checksum algorithm used by SDK (CRC32, CRC32C, SHA1, SHA256, CRC64NVME)
ChecksumAlgorithm types.ChecksumAlgorithm
// Base64 encoded, 32-bit CRC32 checksum of the object
ChecksumCRC32 *string
// Base64 encoded, 32-bit CRC32C checksum of the object
ChecksumCRC32C *string
// Base64 encoded, 64-bit CRC64NVME checksum of the part
ChecksumCRC64NVME *string
// Base64 encoded, 160-bit SHA1 digest of the object
ChecksumSHA1 *string
// Base64 encoded, 256-bit SHA256 digest of the object
ChecksumSHA256 *string
// Size of the body in bytes (useful when size cannot be determined automatically)
ContentLength *int64
// Base64 encoded 128-bit MD5 digest of the part data (not supported for directory buckets)
ContentMD5 *string
// The account ID of the expected bucket owner
ExpectedBucketOwner *string
// Confirms requester knows they will be charged (not supported for directory buckets)
RequestPayer types.RequestPayer
// Algorithm to use for SSE-C encryption (not supported for directory buckets)
SSECustomerAlgorithm *string
// Customer-provided encryption key for SSE-C (not supported for directory buckets)
SSECustomerKey *string
// MD5 digest of the SSE-C encryption key (not supported for directory buckets)
SSECustomerKeyMD5 *string
}type UploadPartOutput struct {
// Whether the multipart upload uses an S3 Bucket Key for SSE-KMS
BucketKeyEnabled *bool
// Base64 encoded, 32-bit CRC32 checksum of the object
ChecksumCRC32 *string
// Base64 encoded, 32-bit CRC32C checksum of the object
ChecksumCRC32C *string
// Base64 encoded, 64-bit CRC64NVME checksum of the part
ChecksumCRC64NVME *string
// Base64 encoded, 160-bit SHA1 digest of the object
ChecksumSHA1 *string
// Base64 encoded, 256-bit SHA256 digest of the object
ChecksumSHA256 *string
// Entity tag for the uploaded object
ETag *string
// Indicates requester was successfully charged (not supported for directory buckets)
RequestCharged types.RequestCharged
// SSE-C algorithm confirmation (not supported for directory buckets)
SSECustomerAlgorithm *string
// SSE-C key MD5 for integrity verification (not supported for directory buckets)
SSECustomerKeyMD5 *string
// ID of the KMS key used for object encryption
SSEKMSKeyId *string
// Server-side encryption algorithm used
ServerSideEncryption types.ServerSideEncryption
// Metadata pertaining to the operation's result
ResultMetadata middleware.Metadata
}Uploads a part in a multipart upload. You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.
For information about maximum and minimum part sizes (5 MB to 5 GB per part, except the last part), see the multipart upload limits documentation.
Permissions:
s3:PutObject permission and KMS permissions for encryptionCreateSession API for session-based authorizationData Integrity:
Encryption: Server-side encryption parameters must match those specified in CreateMultipartUpload (except for SSE-C which must be provided in each request).
Uploads a part by copying data from an existing S3 object as the data source.
func (c *Client) UploadPartCopy(
ctx context.Context,
params *UploadPartCopyInput,
optFns ...func(*Options)
) (*UploadPartCopyOutput, error)type UploadPartCopyInput struct {
// Required: The destination bucket name
Bucket *string
// Required: Specifies the source object (format: sourcebucket/sourcekey or ARN)
CopySource *string
// Required: Object key for which the multipart upload was initiated
Key *string
// Required: Part number (1-10,000)
PartNumber *int32
// Required: Upload ID identifying the multipart upload
UploadId *string
// Copies the object if its ETag matches the specified tag
CopySourceIfMatch *string
// Copies the object if it has been modified since the specified time
CopySourceIfModifiedSince *time.Time
// Copies the object if its ETag is different than the specified ETag
CopySourceIfNoneMatch *string
// Copies the object if it hasn't been modified since the specified time
CopySourceIfUnmodifiedSince *time.Time
// Range of bytes to copy (format: bytes=first-last, minimum 5 MB)
CopySourceRange *string
// Algorithm to decrypt the source object with SSE-C (not supported when source is directory bucket)
CopySourceSSECustomerAlgorithm *string
// Customer-provided encryption key to decrypt source (not supported when source is directory bucket)
CopySourceSSECustomerKey *string
// MD5 digest of the source SSE-C encryption key (not supported when source is directory bucket)
CopySourceSSECustomerKeyMD5 *string
// The account ID of the expected destination bucket owner
ExpectedBucketOwner *string
// The account ID of the expected source bucket owner
ExpectedSourceBucketOwner *string
// Confirms requester knows they will be charged (not supported for directory buckets)
RequestPayer types.RequestPayer
// Algorithm to encrypt the destination object with SSE-C (not supported when destination is directory bucket)
SSECustomerAlgorithm *string
// Customer-provided encryption key for destination SSE-C (not supported when destination is directory bucket)
SSECustomerKey *string
// MD5 digest of the destination SSE-C encryption key (not supported when destination is directory bucket)
SSECustomerKeyMD5 *string
}type UploadPartCopyOutput struct {
// Whether the multipart upload uses an S3 Bucket Key for SSE-KMS
BucketKeyEnabled *bool
// Container for all response elements
CopyPartResult *types.CopyPartResult
// Version of the source object that was copied (not supported when source is directory bucket)
CopySourceVersionId *string
// Indicates requester was successfully charged (not supported for directory buckets)
RequestCharged types.RequestCharged
// SSE-C algorithm confirmation (not supported for directory buckets)
SSECustomerAlgorithm *string
// SSE-C key MD5 for integrity verification (not supported for directory buckets)
SSECustomerKeyMD5 *string
// ID of the KMS key used for object encryption
SSEKMSKeyId *string
// Server-side encryption algorithm used
ServerSideEncryption types.ServerSideEncryption
// Metadata pertaining to the operation's result
ResultMetadata middleware.Metadata
}type CopyPartResult struct {
// Base64 encoded, 32-bit CRC32 checksum of the part
ChecksumCRC32 *string
// Base64 encoded, 32-bit CRC32C checksum of the part
ChecksumCRC32C *string
// Base64 encoded, 64-bit CRC64NVME checksum of the part
ChecksumCRC64NVME *string
// Base64 encoded, 160-bit SHA1 digest of the part
ChecksumSHA1 *string
// Base64 encoded, 256-bit SHA256 digest of the part
ChecksumSHA256 *string
// Entity tag of the object
ETag *string
// Date and time the object was last modified
LastModified *time.Time
}Uploads a part by copying data from an existing object as the data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range.
The minimum part size for a multipart copy is 5 MB. You must initiate a multipart upload before you can upload any part.
Permissions:
CreateSession API with s3express:CreateSession permissionAuthentication: All UploadPartCopy requests must be authenticated and signed using IAM credentials. Directory buckets do not support temporary credentials through CreateSession for this operation.
Encryption:
Completes a multipart upload by assembling previously uploaded parts.
func (c *Client) CompleteMultipartUpload(
ctx context.Context,
params *CompleteMultipartUploadInput,
optFns ...func(*Options)
) (*CompleteMultipartUploadOutput, error)type CompleteMultipartUploadInput struct {
// Required: Name of the bucket to which the multipart upload was initiated
Bucket *string
// Required: Object key for which the multipart upload was initiated
Key *string
// Required: ID for the initiated multipart upload
UploadId *string
// Base64 encoded, 32-bit CRC32 checksum of the object
ChecksumCRC32 *string
// Base64 encoded, 32-bit CRC32C checksum of the object
ChecksumCRC32C *string
// Base64 encoded, 64-bit CRC64NVME checksum of the object
ChecksumCRC64NVME *string
// Base64 encoded, 160-bit SHA1 digest of the object
ChecksumSHA1 *string
// Base64 encoded, 256-bit SHA256 digest of the object
ChecksumSHA256 *string
// Checksum type of the object
ChecksumType types.ChecksumType
// The account ID of the expected bucket owner
ExpectedBucketOwner *string
// Uploads only if ETag matches (conditional write)
IfMatch *string
// Uploads only if key doesn't exist (conditional write, expects '*')
IfNoneMatch *string
// Expected total object size of the multipart upload
MpuObjectSize *int64
// Container for the multipart upload request information
MultipartUpload *types.CompletedMultipartUpload
// Confirms requester knows they will be charged (not supported for directory buckets)
RequestPayer types.RequestPayer
// SSE-C algorithm (needed when object was created with checksum, not supported for directory buckets)
SSECustomerAlgorithm *string
// SSE-C customer managed key (not supported for directory buckets)
SSECustomerKey *string
// MD5 of SSE-C key (not supported for directory buckets)
SSECustomerKeyMD5 *string
}type CompletedMultipartUpload struct {
// Array of CompletedPart data types (must be in ascending order by part number)
Parts []CompletedPart
}type CompletedPart struct {
// Base64 encoded, 32-bit CRC32 checksum of the part
ChecksumCRC32 *string
// Base64 encoded, 32-bit CRC32C checksum of the part
ChecksumCRC32C *string
// Base64 encoded, 64-bit CRC64NVME checksum of the part
ChecksumCRC64NVME *string
// Base64 encoded, 160-bit SHA1 digest of the part
ChecksumSHA1 *string
// Base64 encoded, 256-bit SHA256 digest of the part
ChecksumSHA256 *string
// Entity tag returned when the part was uploaded
ETag *string
// Part number identifying the part (1-10,000)
PartNumber *int32
}type CompleteMultipartUploadOutput struct {
// Name of the bucket containing the newly created object
Bucket *string
// Whether the multipart upload uses an S3 Bucket Key for SSE-KMS
BucketKeyEnabled *bool
// Base64 encoded, 32-bit CRC32 checksum of the object
ChecksumCRC32 *string
// Base64 encoded, 32-bit CRC32C checksum of the object
ChecksumCRC32C *string
// Base64 encoded, 64-bit CRC64NVME checksum of the object
ChecksumCRC64NVME *string
// Base64 encoded, 160-bit SHA1 digest of the object
ChecksumSHA1 *string
// Base64 encoded, 256-bit SHA256 digest of the object
ChecksumSHA256 *string
// Checksum type used
ChecksumType types.ChecksumType
// Entity tag that identifies the newly created object's data
ETag *string
// Expiration date (expiry-date) and rule ID (rule-id) if configured (not supported for directory buckets)
Expiration *string
// Object key of the newly created object
Key *string
// URI that identifies the newly created object
Location *string
// Indicates requester was successfully charged (not supported for directory buckets)
RequestCharged types.RequestCharged
// ID of the KMS key used for object encryption
SSEKMSKeyId *string
// Server-side encryption algorithm used
ServerSideEncryption types.ServerSideEncryption
// Version ID of the newly created object (not supported for directory buckets)
VersionId *string
// Metadata pertaining to the operation's result
ResultMetadata middleware.Metadata
}Completes a multipart upload by assembling previously uploaded parts. You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object.
In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded.
Processing Time: The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out.
Permissions:
s3:PutObject permission and KMS permissions for encrypted objectsCreateSession API for session-based authorizationSpecial Errors:
Aborts a multipart upload and frees up the storage consumed by uploaded parts.
func (c *Client) AbortMultipartUpload(
ctx context.Context,
params *AbortMultipartUploadInput,
optFns ...func(*Options)
) (*AbortMultipartUploadOutput, error)type AbortMultipartUploadInput struct {
// Required: The bucket name to which the upload was taking place
Bucket *string
// Required: Key of the object for which the multipart upload was initiated
Key *string
// Required: Upload ID that identifies the multipart upload
UploadId *string
// The account ID of the expected bucket owner
ExpectedBucketOwner *string
// Aborts upload only if initiated on the provided timestamp (directory buckets only)
IfMatchInitiatedTime *time.Time
// Confirms requester knows they will be charged (not supported for directory buckets)
RequestPayer types.RequestPayer
}type AbortMultipartUploadOutput struct {
// Indicates requester was successfully charged (not supported for directory buckets)
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result
ResultMetadata middleware.Metadata
}This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for part storage, you should call the ListParts operation and ensure that the parts list is empty.
Directory buckets: If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.
Permissions:
s3:AbortMultipartUpload permissionCreateSession API for session-based authorizationLists in-progress multipart uploads in a bucket.
func (c *Client) ListMultipartUploads(
ctx context.Context,
params *ListMultipartUploadsInput,
optFns ...func(*Options)
) (*ListMultipartUploadsOutput, error)type ListMultipartUploadsInput struct {
// Required: The name of the bucket to which the multipart upload was initiated
Bucket *string
// Character used to group keys (delimiter filtering)
Delimiter *string
// Encoding type for object keys in response (url)
EncodingType types.EncodingType
// The account ID of the expected bucket owner
ExpectedBucketOwner *string
// Specifies the multipart upload after which listing should begin
KeyMarker *string
// Maximum number of multipart uploads to return (1-1,000, default 1,000)
MaxUploads *int32
// Lists uploads only for keys that begin with the specified prefix
Prefix *string
// Confirms requester knows they will be charged (not supported for directory buckets)
RequestPayer types.RequestPayer
// Together with key-marker, specifies where listing should begin (not supported for directory buckets)
UploadIdMarker *string
}type ListMultipartUploadsOutput struct {
// Name of the bucket to which the multipart upload was initiated
Bucket *string
// Distinct key prefixes containing the delimiter
CommonPrefixes []types.CommonPrefix
// Delimiter specified in the request
Delimiter *string
// Encoding type used by Amazon S3 to encode object keys
EncodingType types.EncodingType
// Whether the returned list is truncated
IsTruncated *bool
// Key at or after which the listing began
KeyMarker *string
// Maximum number of multipart uploads that could have been included
MaxUploads *int32
// Value to use for key-marker in subsequent request when list is truncated
NextKeyMarker *string
// Value to use for upload-id-marker in subsequent request (not supported for directory buckets)
NextUploadIdMarker *string
// Prefix specified in the request
Prefix *string
// Indicates requester was successfully charged (not supported for directory buckets)
RequestCharged types.RequestCharged
// Upload ID marker from the request (not supported for directory buckets)
UploadIdMarker *string
// Container for elements related to multipart uploads
Uploads []types.MultipartUpload
// Metadata pertaining to the operation's result
ResultMetadata middleware.Metadata
}type MultipartUpload struct {
// Algorithm used to create a checksum of the object
ChecksumAlgorithm ChecksumAlgorithm
// Checksum type for the object
ChecksumType ChecksumType
// Date and time at which the multipart upload was initiated
Initiated *time.Time
// Identifies who initiated the multipart upload
Initiator *Initiator
// Key of the object for which the multipart upload was initiated
Key *string
// Owner of the object that is part of the multipart upload
Owner *Owner
// Class of storage used to store the object
StorageClass StorageClass
// Upload ID that identifies the multipart upload
UploadId *string
}type Initiator struct {
// Name of the Principal (not supported for directory buckets after November 21, 2025)
DisplayName *string
// Canonical User ID for AWS accounts or user ARN for IAM users
ID *string
}type Owner struct {
// Display name of the owner (supported in specific regions only)
DisplayName *string
// ID of the owner
ID *string
}type ListMultipartUploadsPaginator struct {
// Use to iterate through pages
}
// Constructor
func NewListMultipartUploadsPaginator(
client ListMultipartUploadsAPIClient,
params *ListMultipartUploadsInput,
optFns ...func(*ListMultipartUploadsPaginatorOptions)
) *ListMultipartUploadsPaginator
// Check if more pages are available
func (p *ListMultipartUploadsPaginator) HasMorePages() bool
// Retrieve the next page
func (p *ListMultipartUploadsPaginator) NextPage(
ctx context.Context,
optFns ...func(*Options)
) (*ListMultipartUploadsOutput, error)type ListMultipartUploadsPaginatorOptions struct {
// Maximum number of multipart uploads to return per page
Limit int32
// Stop pagination if service returns duplicate token
StopOnDuplicateToken bool
}This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is one that has been initiated by CreateMultipartUpload but has not yet been completed or aborted.
The operation returns a maximum of 1,000 multipart uploads in the response. If there are more than 1,000 multipart uploads, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent requests with the key-marker and upload-id-marker parameters.
Directory buckets: The upload-id-marker element and NextUploadIdMarker element aren't supported. To list additional multipart uploads, only set the value of key-marker to the NextKeyMarker value from the previous response.
Permissions:
s3:ListBucketMultipartUploads permissionCreateSession API for session-based authorizationSorting:
Lists the parts that have been uploaded for a specific multipart upload.
func (c *Client) ListParts(
ctx context.Context,
params *ListPartsInput,
optFns ...func(*Options)
) (*ListPartsOutput, error)type ListPartsInput struct {
// Required: The name of the bucket to which the parts are being uploaded
Bucket *string
// Required: Object key for which the multipart upload was initiated
Key *string
// Required: Upload ID identifying the multipart upload whose parts are being listed
UploadId *string
// The account ID of the expected bucket owner
ExpectedBucketOwner *string
// Maximum number of parts to return (default/max 1,000)
MaxParts *int32
// Part number after which listing should begin
PartNumberMarker *string
// Confirms requester knows they will be charged (not supported for directory buckets)
RequestPayer types.RequestPayer
// SSE-C algorithm (needed when object was created with checksum, not supported for directory buckets)
SSECustomerAlgorithm *string
// SSE-C customer managed key (not supported for directory buckets)
SSECustomerKey *string
// MD5 of SSE-C key (not supported for directory buckets)
SSECustomerKeyMD5 *string
}type ListPartsOutput struct {
// Date when the initiated multipart upload becomes eligible for abort (not supported for directory buckets)
AbortDate *time.Time
// Lifecycle rule ID that defines the abort action (not supported for directory buckets)
AbortRuleId *string
// Name of the bucket to which the multipart upload was initiated
Bucket *string
// Algorithm used to create a checksum of the object
ChecksumAlgorithm types.ChecksumAlgorithm
// Checksum type used
ChecksumType types.ChecksumType
// Identifies who initiated the multipart upload
Initiator *types.Initiator
// Indicates whether the returned list of parts is truncated
IsTruncated *bool
// Object key for which the multipart upload was initiated
Key *string
// Maximum number of parts allowed in the response
MaxParts *int32
// When list is truncated, specifies the last part and value for next request
NextPartNumberMarker *string
// Object owner (for directory buckets, returns bucket owner for all parts)
Owner *types.Owner
// Part number after which listing should begin
PartNumberMarker *string
// Container for elements related to parts (zero or more Part elements)
Parts []types.Part
// Indicates requester was successfully charged (not supported for directory buckets)
RequestCharged types.RequestCharged
// Class of storage used to store the uploaded object
StorageClass types.StorageClass
// Upload ID identifying the multipart upload
UploadId *string
// Metadata pertaining to the operation's result
ResultMetadata middleware.Metadata
}type Part struct {
// Base64 encoded, 32-bit CRC32 checksum of the part
ChecksumCRC32 *string
// Base64 encoded, 32-bit CRC32C checksum of the part
ChecksumCRC32C *string
// Base64 encoded, 64-bit CRC64NVME checksum of the part
ChecksumCRC64NVME *string
// Base64 encoded, 160-bit SHA1 digest of the part
ChecksumSHA1 *string
// Base64 encoded, 256-bit SHA256 digest of the part
ChecksumSHA256 *string
// Entity tag returned when the part was uploaded
ETag *string
// Date and time at which the part was uploaded
LastModified *time.Time
// Part number identifying the part (1-10,000)
PartNumber *int32
// Size in bytes of the uploaded part data
Size *int64
}type ListPartsPaginator struct {
// Use to iterate through pages
}
// Constructor
func NewListPartsPaginator(
client ListPartsAPIClient,
params *ListPartsInput,
optFns ...func(*ListPartsPaginatorOptions)
) *ListPartsPaginator
// Check if more pages are available
func (p *ListPartsPaginator) HasMorePages() bool
// Retrieve the next page
func (p *ListPartsPaginator) NextPage(
ctx context.Context,
optFns ...func(*Options)
) (*ListPartsOutput, error)type ListPartsPaginatorOptions struct {
// Maximum number of parts to return per page
Limit int32
// Stop pagination if service returns duplicate token
StopOnDuplicateToken bool
}Lists the parts that have been uploaded for a specific multipart upload. To use this operation, you must provide the upload ID in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.
The ListParts request returns a maximum of 1,000 uploaded parts. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts requests, include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.
Permissions:
s3:ListMultipartUploadParts permission. If the upload was created using SSE-KMS, you must also have kms:Decrypt permissionCreateSession API for session-based authorizationThis example demonstrates a complete multipart upload workflow.
package main
import (
"bytes"
"context"
"fmt"
"io"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
func main() {
ctx := context.TODO()
// Load AWS configuration
cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion("us-west-2"))
if err != nil {
log.Fatalf("unable to load SDK config: %v", err)
}
// Create S3 client
client := s3.NewFromConfig(cfg)
bucketName := "my-bucket"
objectKey := "large-file.dat"
partSize := int64(5 * 1024 * 1024) // 5 MB parts
// Step 1: Initiate multipart upload
createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
// Optional: Specify metadata, encryption, storage class, etc.
Metadata: map[string]string{
"description": "Large file uploaded via multipart",
},
ServerSideEncryption: types.ServerSideEncryptionAes256,
ChecksumAlgorithm: types.ChecksumAlgorithmCrc32,
})
if err != nil {
log.Fatalf("failed to create multipart upload: %v", err)
}
uploadId := createResp.UploadId
fmt.Printf("Initiated multipart upload with ID: %s\n", *uploadId)
// Step 2: Upload parts
// In a real application, you would read from a file or stream
totalSize := int64(15 * 1024 * 1024) // 15 MB total (3 parts)
numParts := (totalSize + partSize - 1) / partSize
completedParts := make([]types.CompletedPart, 0, numParts)
for partNum := int32(1); partNum <= int32(numParts); partNum++ {
// Calculate part size (last part may be smaller)
currentPartSize := partSize
if int64(partNum)*partSize > totalSize {
currentPartSize = totalSize - int64(partNum-1)*partSize
}
// Create sample data for this part
partData := bytes.Repeat([]byte("x"), int(currentPartSize))
// Upload the part
uploadResp, err := client.UploadPart(ctx, &s3.UploadPartInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
PartNumber: aws.Int32(partNum),
UploadId: uploadId,
Body: bytes.NewReader(partData),
ContentLength: aws.Int64(currentPartSize),
})
if err != nil {
// If upload fails, abort the multipart upload
_, abortErr := client.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
UploadId: uploadId,
})
if abortErr != nil {
log.Printf("failed to abort multipart upload: %v", abortErr)
}
log.Fatalf("failed to upload part %d: %v", partNum, err)
}
fmt.Printf("Uploaded part %d, ETag: %s\n", partNum, *uploadResp.ETag)
// Add completed part to the list
completedParts = append(completedParts, types.CompletedPart{
ETag: uploadResp.ETag,
PartNumber: aws.Int32(partNum),
})
}
// Step 3: Complete the multipart upload
completeResp, err := client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
UploadId: uploadId,
MultipartUpload: &types.CompletedMultipartUpload{
Parts: completedParts,
},
})
if err != nil {
log.Fatalf("failed to complete multipart upload: %v", err)
}
fmt.Printf("Successfully completed multipart upload!\n")
fmt.Printf("Location: %s\n", *completeResp.Location)
fmt.Printf("ETag: %s\n", *completeResp.ETag)
}
// Example: Upload part by copying from existing object
func uploadPartCopyExample(ctx context.Context, client *s3.Client) {
uploadResp, err := client.UploadPartCopy(ctx, &s3.UploadPartCopyInput{
Bucket: aws.String("destination-bucket"),
Key: aws.String("destination-key"),
PartNumber: aws.Int32(1),
UploadId: aws.String("upload-id-here"),
CopySource: aws.String("source-bucket/source-key"),
// Optional: Copy only a byte range
CopySourceRange: aws.String("bytes=0-5242879"), // First 5 MB
})
if err != nil {
log.Fatalf("failed to upload part copy: %v", err)
}
fmt.Printf("Part copied, ETag: %s\n", *uploadResp.CopyPartResult.ETag)
}
// Example: List in-progress multipart uploads
func listUploadsExample(ctx context.Context, client *s3.Client) {
// Using paginator for automatic pagination
paginator := s3.NewListMultipartUploadsPaginator(client, &s3.ListMultipartUploadsInput{
Bucket: aws.String("my-bucket"),
})
for paginator.HasMorePages() {
page, err := paginator.NextPage(ctx)
if err != nil {
log.Fatalf("failed to list uploads: %v", err)
}
for _, upload := range page.Uploads {
fmt.Printf("Upload ID: %s, Key: %s, Initiated: %v\n",
*upload.UploadId, *upload.Key, *upload.Initiated)
}
}
}
// Example: List parts of an upload
func listPartsExample(ctx context.Context, client *s3.Client, uploadId string) {
// Using paginator
paginator := s3.NewListPartsPaginator(client, &s3.ListPartsInput{
Bucket: aws.String("my-bucket"),
Key: aws.String("my-key"),
UploadId: aws.String(uploadId),
})
for paginator.HasMorePages() {
page, err := paginator.NextPage(ctx)
if err != nil {
log.Fatalf("failed to list parts: %v", err)
}
for _, part := range page.Parts {
fmt.Printf("Part %d: Size=%d bytes, ETag=%s\n",
*part.PartNumber, *part.Size, *part.ETag)
}
}
}
// Example: Abort a multipart upload
func abortUploadExample(ctx context.Context, client *s3.Client, uploadId string) {
_, err := client.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String("my-bucket"),
Key: aws.String("my-key"),
UploadId: aws.String(uploadId),
})
if err != nil {
log.Fatalf("failed to abort upload: %v", err)
}
fmt.Println("Multipart upload aborted successfully")
}Always implement proper error handling and cleanup:
uploadResp, err := client.UploadPart(ctx, input)
if err != nil {
// Abort the multipart upload on error
client.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{
Bucket: input.Bucket,
Key: input.Key,
UploadId: input.UploadId,
})
return fmt.Errorf("upload failed: %w", err)
}Upload parts in parallel for better performance:
// Use goroutines and sync.WaitGroup for concurrent uploads
var wg sync.WaitGroup
partsChan := make(chan types.CompletedPart, numParts)
for partNum := 1; partNum <= numParts; partNum++ {
wg.Add(1)
go func(pn int32) {
defer wg.Done()
// Upload part...
partsChan <- completedPart
}(int32(partNum))
}
wg.Wait()
close(partsChan)Configure bucket lifecycle rules to automatically abort incomplete multipart uploads:
// Parts from incomplete multipart uploads will be cleaned up after 7 days
AbortIncompleteMultipartUpload: &types.AbortIncompleteMultipartUpload{
DaysAfterInitiation: aws.Int32(7),
}Use checksums to verify data integrity:
createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
ChecksumAlgorithm: types.ChecksumAlgorithmCrc32c,
})
// SDK will automatically calculate and include checksums in UploadPartStore upload ID and completed parts to enable resume after interruption:
// Save to persistent storage
type UploadState struct {
UploadId string
CompletedParts []types.CompletedPart
}
// On resume, list already uploaded parts
listResp, _ := client.ListParts(ctx, &s3.ListPartsInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
UploadId: aws.String(uploadId),
})
// Continue from where you left off