deps: update vendor dependencies for S3-compatible storage

Updates AWS SDK and removes Blazer B2 dependency in favor of unified
S3-compatible approach. Includes configuration examples and documentation.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-07-01 23:07:58 +12:00
parent f99a866e13
commit 6558a09258
277 changed files with 117799 additions and 26 deletions

42208
vendor/github.com/aws/aws-sdk-go/service/s3/api.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,202 @@
package s3
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
const (
contentMD5Header = "Content-Md5"
contentSha256Header = "X-Amz-Content-Sha256"
amzTeHeader = "X-Amz-Te"
amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
appendMD5TxEncoding = "append-md5"
)
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
// request. If the body is not seekable or S3DisableContentMD5Validation set
// this handler will be ignored.
func computeBodyHashes(r *request.Request) {
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
return
}
if r.IsPresigned() {
return
}
if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
return
}
var md5Hash, sha256Hash hash.Hash
hashers := make([]io.Writer, 0, 2)
// Determine upfront which hashes can be set without overriding user
// provide header data.
if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
md5Hash = md5.New()
hashers = append(hashers, md5Hash)
}
if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
sha256Hash = sha256.New()
hashers = append(hashers, sha256Hash)
}
// Create the destination writer based on the hashes that are not already
// provided by the user.
var dst io.Writer
switch len(hashers) {
case 0:
return
case 1:
dst = hashers[0]
default:
dst = io.MultiWriter(hashers...)
}
if _, err := aws.CopySeekableBody(dst, r.Body); err != nil {
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
return
}
// For the hashes created, set the associated headers that the user did not
// already provide.
if md5Hash != nil {
sum := make([]byte, md5.Size)
encoded := make([]byte, md5Base64EncLen)
base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
}
if sha256Hash != nil {
encoded := make([]byte, sha256HexEncLen)
sum := make([]byte, sha256.Size)
hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
}
}
const (
md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
)
// Adds the x-amz-te: append_md5 header to the request. This requests the service
// responds with a trailing MD5 checksum.
//
// Will not ask for append MD5 if disabled, the request is presigned or,
// or the API operation does not support content MD5 validation.
func askForTxEncodingAppendMD5(r *request.Request) {
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
return
}
if r.IsPresigned() {
return
}
r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
}
func useMD5ValidationReader(r *request.Request) {
if r.Error != nil {
return
}
if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
return
}
var bodyReader *io.ReadCloser
var contentLen int64
switch tv := r.Data.(type) {
case *GetObjectOutput:
bodyReader = &tv.Body
contentLen = aws.Int64Value(tv.ContentLength)
// Update ContentLength hiden the trailing MD5 checksum.
tv.ContentLength = aws.Int64(contentLen - md5.Size)
tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
default:
r.Error = awserr.New("ChecksumValidationError",
fmt.Sprintf("%s: %s header received on unsupported API, %s",
amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
), nil)
return
}
if contentLen < md5.Size {
r.Error = awserr.New("ChecksumValidationError",
fmt.Sprintf("invalid Content-Length %d for %s %s",
contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
), nil)
return
}
// Wrap and swap the response body reader with the validation reader.
*bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
}
type md5ValidationReader struct {
rawReader io.ReadCloser
payload io.Reader
hash hash.Hash
payloadLen int64
read int64
}
func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
h := md5.New()
return &md5ValidationReader{
rawReader: reader,
payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
hash: h,
payloadLen: payloadLen,
}
}
func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
n, err = v.payload.Read(p)
if err != nil && err != io.EOF {
return n, err
}
v.read += int64(n)
if err == io.EOF {
if v.read != v.payloadLen {
return n, io.ErrUnexpectedEOF
}
expectSum := make([]byte, md5.Size)
actualSum := make([]byte, md5.Size)
if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
return n, sumReadErr
}
actualSum = v.hash.Sum(actualSum[0:0])
if !bytes.Equal(expectSum, actualSum) {
return n, awserr.New("InvalidChecksum",
fmt.Sprintf("expected MD5 checksum %s, got %s",
hex.EncodeToString(expectSum),
hex.EncodeToString(actualSum),
),
nil)
}
}
return n, err
}
func (v *md5ValidationReader) Close() error {
return v.rawReader.Close()
}

View File

@@ -0,0 +1,107 @@
package s3
import (
"io/ioutil"
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
// NormalizeBucketLocation is a utility function which will update the
// passed in value to always be a region ID. Generally this would be used
// with GetBucketLocation API operation.
//
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
//
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
func NormalizeBucketLocation(loc string) string {
switch loc {
case "":
loc = "us-east-1"
case "EU":
loc = "eu-west-1"
}
return loc
}
// NormalizeBucketLocationHandler is a request handler which will update the
// GetBucketLocation's result LocationConstraint value to always be a region ID.
//
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
//
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
//
// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// })
// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
// err := req.Send()
var NormalizeBucketLocationHandler = request.NamedHandler{
Name: "awssdk.s3.NormalizeBucketLocation",
Fn: func(req *request.Request) {
if req.Error != nil {
return
}
out := req.Data.(*GetBucketLocationOutput)
loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
out.LocationConstraint = aws.String(loc)
},
}
// WithNormalizeBucketLocation is a request option which will update the
// GetBucketLocation's result LocationConstraint value to always be a region ID.
//
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
//
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
//
// result, err := svc.GetBucketLocationWithContext(ctx,
// &s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// },
// s3.WithNormalizeBucketLocation,
// )
func WithNormalizeBucketLocation(r *request.Request) {
r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
}
func buildGetBucketLocation(r *request.Request) {
if r.DataFilled() {
out := r.Data.(*GetBucketLocationOutput)
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New(request.ErrCodeSerialization,
"failed reading response body", err)
return
}
match := reBucketLocation.FindSubmatch(b)
if len(match) > 1 {
loc := string(match[1])
out.LocationConstraint = aws.String(loc)
}
}
}
func populateLocationConstraint(r *request.Request) {
if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
in := r.Params.(*CreateBucketInput)
if in.CreateBucketConfiguration == nil {
r.Params = awsutil.CopyOf(r.Params)
in = r.Params.(*CreateBucketInput)
in.CreateBucketConfiguration = &CreateBucketConfiguration{
LocationConstraint: r.Config.Region,
}
}
}
}

View File

@@ -0,0 +1,89 @@
package s3
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
"github.com/aws/aws-sdk-go/internal/s3shared/s3err"
)
func init() {
initClient = defaultInitClientFn
initRequest = defaultInitRequestFn
}
func defaultInitClientFn(c *client.Client) {
if c.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateUnset {
if aws.BoolValue(c.Config.UseDualStack) {
c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled
} else {
c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateDisabled
}
}
// Support building custom endpoints based on config
c.Handlers.Build.PushFront(endpointHandler)
// Require SSL when using SSE keys
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
c.Handlers.Build.PushBack(computeSSEKeyMD5)
c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5)
// S3 uses custom error unmarshaling logic
c.Handlers.UnmarshalError.Clear()
c.Handlers.UnmarshalError.PushBack(unmarshalError)
c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
}
func defaultInitRequestFn(r *request.Request) {
// Add request handlers for specific platforms.
// e.g. 100-continue support for PUT requests using Go 1.6
platformRequestHandlers(r)
switch r.Operation.Name {
case opGetBucketLocation:
// GetBucketLocation has custom parsing logic
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
case opCreateBucket:
// Auto-populate LocationConstraint with current region
r.Handlers.Validate.PushFront(populateLocationConstraint)
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarshalError)
r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
case opPutObject, opUploadPart:
r.Handlers.Build.PushBack(computeBodyHashes)
// Disabled until #1837 root issue is resolved.
// case opGetObject:
// r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
// r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
case opWriteGetObjectResponse:
r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint)
}
}
// bucketGetter is an accessor interface to grab the "Bucket" field from
// an S3 type.
type bucketGetter interface {
getBucket() string
}
// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
// field from an S3 type.
type sseCustomerKeyGetter interface {
getSSECustomerKey() string
}
// copySourceSSECustomerKeyGetter is an accessor interface to grab the
// "CopySourceSSECustomerKey" field from an S3 type.
type copySourceSSECustomerKeyGetter interface {
getCopySourceSSECustomerKey() string
}
// endpointARNGetter is an accessor interface to grab the
// the field corresponding to an endpoint ARN input.
type endpointARNGetter interface {
getEndpointARN() (arn.Resource, error)
hasEndpointARN() bool
}

26
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go generated vendored Normal file
View File

@@ -0,0 +1,26 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package s3 provides the client and types for making API
// requests to Amazon Simple Storage Service.
//
// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
//
// See s3 package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
//
// Using the Client
//
// To contact Amazon Simple Storage Service with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Amazon Simple Storage Service client S3 for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
package s3

View File

@@ -0,0 +1,110 @@
// Upload Managers
//
// The s3manager package's Uploader provides concurrent upload of content to S3
// by taking advantage of S3's Multipart APIs. The Uploader also supports both
// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
// for optimizations if the Body satisfies that type. Once the Uploader instance
// is created you can call Upload concurrently from multiple goroutines safely.
//
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
//
// f, err := os.Open(filename)
// if err != nil {
// return fmt.Errorf("failed to open file %q, %v", filename, err)
// }
//
// // Upload the file to S3.
// result, err := uploader.Upload(&s3manager.UploadInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// Body: f,
// })
// if err != nil {
// return fmt.Errorf("failed to upload file, %v", err)
// }
// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
//
// See the s3manager package's Uploader type documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
//
// Download Manager
//
// The s3manager package's Downloader provides concurrently downloading of Objects
// from S3. The Downloader will write S3 Object content with an io.WriterAt.
// Once the Downloader instance is created you can call Download concurrently from
// multiple goroutines safely.
//
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
//
// // Create a file to write the S3 Object contents to.
// f, err := os.Create(filename)
// if err != nil {
// return fmt.Errorf("failed to create file %q, %v", filename, err)
// }
//
// // Write the contents of S3 Object to the file
// n, err := downloader.Download(f, &s3.GetObjectInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// })
// if err != nil {
// return fmt.Errorf("failed to download file, %v", err)
// }
// fmt.Printf("file downloaded, %d bytes\n", n)
//
// See the s3manager package's Downloader type documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
//
// Automatic URI cleaning
//
// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
// used by the service client.
//
// svc := s3.New(sess, &aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// })
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
//
// Get Bucket Region
//
// GetBucketRegion will attempt to get the region for a bucket using a region
// hint to determine which AWS partition to perform the query on. Use this utility
// to determine the region a bucket is in.
//
// sess := session.Must(session.NewSession())
//
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
// See the s3manager package's GetBucketRegion function documentation for more information
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
//
// S3 Crypto Client
//
// The s3crypto package provides the tools to upload and download encrypted
// content from S3. The Encryption and Decryption clients can be used concurrently
// once the client is created.
//
// See the s3crypto package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
//
package s3

299
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go generated vendored Normal file
View File

@@ -0,0 +1,299 @@
package s3
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/endpoints"
"net/url"
"strings"
"github.com/aws/aws-sdk-go/aws"
awsarn "github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3shared"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
)
const (
s3Namespace = "s3"
s3AccessPointNamespace = "s3-accesspoint"
s3ObjectsLambdaNamespace = "s3-object-lambda"
s3OutpostsNamespace = "s3-outposts"
)
// Used by shapes with members decorated as endpoint ARN.
func parseEndpointARN(v string) (arn.Resource, error) {
return arn.ParseResource(v, accessPointResourceParser)
}
func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
resParts := arn.SplitResource(a.Resource)
switch resParts[0] {
case "accesspoint":
switch a.Service {
case s3Namespace:
return arn.ParseAccessPointResource(a, resParts[1:])
case s3ObjectsLambdaNamespace:
return parseS3ObjectLambdaAccessPointResource(a, resParts)
default:
return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)}
}
case "outpost":
if a.Service != "s3-outposts" {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
}
return parseOutpostAccessPointResource(a, resParts[1:])
default:
return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
}
}
// parseOutpostAccessPointResource attempts to parse the ARNs resource as an
// outpost access-point resource.
//
// Supported Outpost AccessPoint ARN format:
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
//
func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
// outpost accesspoint arn is only valid if service is s3-outposts
if a.Service != "s3-outposts" {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
}
if len(resParts) == 0 {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
}
if len(resParts) < 3 {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{
ARN: a, Reason: "access-point resource not set in Outpost ARN",
}
}
resID := strings.TrimSpace(resParts[0])
if len(resID) == 0 {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
}
var outpostAccessPointARN = arn.OutpostAccessPointARN{}
switch resParts[1] {
case "accesspoint":
accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:])
if err != nil {
return arn.OutpostAccessPointARN{}, err
}
// set access-point arn
outpostAccessPointARN.AccessPointARN = accessPointARN
default:
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"}
}
// set outpost id
outpostAccessPointARN.OutpostID = resID
return outpostAccessPointARN, nil
}
func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) {
if a.Service != s3ObjectsLambdaNamespace {
return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)}
}
accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:])
if err != nil {
return arn.S3ObjectLambdaAccessPointARN{}, err
}
if len(accessPointARN.Region) == 0 {
return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)}
}
return arn.S3ObjectLambdaAccessPointARN{
AccessPointARN: accessPointARN,
}, nil
}
func endpointHandler(req *request.Request) {
endpoint, ok := req.Params.(endpointARNGetter)
if !ok || !endpoint.hasEndpointARN() {
updateBucketEndpointFromParams(req)
return
}
resource, err := endpoint.getEndpointARN()
if err != nil {
req.Error = s3shared.NewInvalidARNError(nil, err)
return
}
resReq := s3shared.ResourceRequest{
Resource: resource,
Request: req,
}
if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() {
req.Error = s3shared.NewClientPartitionMismatchError(resource,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
return
}
if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {
req.Error = s3shared.NewClientRegionMismatchError(resource,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
return
}
switch tv := resource.(type) {
case arn.AccessPointARN:
err = updateRequestAccessPointEndpoint(req, tv)
if err != nil {
req.Error = err
}
case arn.S3ObjectLambdaAccessPointARN:
err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv)
if err != nil {
req.Error = err
}
case arn.OutpostAccessPointARN:
// outposts does not support FIPS regions
if req.Config.UseFIPSEndpoint == endpoints.FIPSEndpointStateEnabled {
req.Error = s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID,
aws.StringValue(req.Config.Region), nil)
return
}
err = updateRequestOutpostAccessPointEndpoint(req, tv)
if err != nil {
req.Error = err
}
default:
req.Error = s3shared.NewInvalidARNError(resource, nil)
}
}
func updateBucketEndpointFromParams(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucket name was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
updateEndpointForS3Config(r, bucket)
}
func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error {
// Accelerate not supported
if aws.BoolValue(req.Config.S3UseAccelerate) {
return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Ignore the disable host prefix for access points
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil {
return err
}
removeBucketFromPath(req.HTTPRequest.URL)
return nil
}
func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error {
// DualStack not supported
if isUseDualStackEndpoint(req) {
return s3shared.NewClientConfiguredForDualStackError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Accelerate not supported
if aws.BoolValue(req.Config.S3UseAccelerate) {
return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Ignore the disable host prefix for access points
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
return err
}
removeBucketFromPath(req.HTTPRequest.URL)
return nil
}
func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error {
// Accelerate not supported
if aws.BoolValue(req.Config.S3UseAccelerate) {
return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Dualstack not supported
if isUseDualStackEndpoint(req) {
return s3shared.NewClientConfiguredForDualStackError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Ignore the disable host prefix for access points
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
return err
}
removeBucketFromPath(req.HTTPRequest.URL)
return nil
}
func removeBucketFromPath(u *url.URL) {
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
if u.Path == "" {
u.Path = "/"
}
}
func buildWriteGetObjectResponseEndpoint(req *request.Request) {
// DualStack not supported
if isUseDualStackEndpoint(req) {
req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil)
return
}
// Accelerate not supported
if aws.BoolValue(req.Config.S3UseAccelerate) {
req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil)
return
}
signingName := s3ObjectsLambdaNamespace
signingRegion := req.ClientInfo.SigningRegion
if !hasCustomEndpoint(req) {
endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), req.ClientInfo.ResolvedRegion, EndpointsID)
if err != nil {
req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err)
return
}
signingRegion = endpoint.SigningRegion
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
req.Error = err
return
}
updateS3HostPrefixForS3ObjectLambda(req)
}
redirectSigner(req, signingName, signingRegion)
}
func isUseDualStackEndpoint(req *request.Request) bool {
if req.Config.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
return req.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateEnabled
}
return aws.BoolValue(req.Config.UseDualStack)
}

View File

@@ -0,0 +1,242 @@
package s3
import (
"net/url"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3shared"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
"github.com/aws/aws-sdk-go/private/protocol"
)
const (
accessPointPrefixLabel = "accesspoint"
accountIDPrefixLabel = "accountID"
accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
outpostPrefixLabel = "outpost"
outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}."
)
// hasCustomEndpoint returns true if endpoint is a custom endpoint
func hasCustomEndpoint(r *request.Request) bool {
return len(aws.StringValue(r.Config.Endpoint)) > 0
}
// accessPointEndpointBuilder represents the endpoint builder for access point arn
type accessPointEndpointBuilder arn.AccessPointARN
// build builds the endpoint for corresponding access point arn
//
// For building an endpoint from access point arn, format used is:
// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix}
// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com
//
// Access Point Endpoint requests are signed using "s3" as signing name.
//
func (a accessPointEndpointBuilder) build(req *request.Request) error {
resolveService := arn.AccessPointARN(a).Service
resolveRegion := arn.AccessPointARN(a).Region
endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", resolveService)
if err != nil {
return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a),
req.ClientInfo.PartitionID, resolveRegion, err)
}
endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
if !hasCustomEndpoint(req) {
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
return err
}
// dual stack provided by endpoint resolver
updateS3HostForS3AccessPoint(req)
}
protocol.HostPrefixBuilder{
Prefix: accessPointPrefixTemplate,
LabelsFn: a.hostPrefixLabelValues,
}.Build(req)
// signer redirection
redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion)
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
if err != nil {
return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err)
}
return nil
}
func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
return map[string]string{
accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
}
}
// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn
type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN
// build builds the endpoint for corresponding access point arn
//
// For building an endpoint from access point arn, format used is:
// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix}
// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com
//
// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name.
//
func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error {
resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region
endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", EndpointsID)
if err != nil {
return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a),
req.ClientInfo.PartitionID, resolveRegion, err)
}
endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
endpoint.SigningName = s3ObjectsLambdaNamespace
if !hasCustomEndpoint(req) {
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
return err
}
updateS3HostPrefixForS3ObjectLambda(req)
}
protocol.HostPrefixBuilder{
Prefix: accessPointPrefixTemplate,
LabelsFn: a.hostPrefixLabelValues,
}.Build(req)
// signer redirection
redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion)
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
if err != nil {
return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err)
}
return nil
}
func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
return map[string]string{
accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName,
accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID,
}
}
// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn.
type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN
// build builds an endpoint corresponding to the outpost access point arn.
//
// For building an endpoint from outpost access point arn, format used is:
// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix}
// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com
//
// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name.
//
func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
resolveRegion := o.Region
resolveService := o.Service
endpointsID := resolveService
if resolveService == s3OutpostsNamespace {
endpointsID = "s3"
}
endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", endpointsID)
if err != nil {
return s3shared.NewFailedToResolveEndpointError(o,
req.ClientInfo.PartitionID, resolveRegion, err)
}
endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
if !hasCustomEndpoint(req) {
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
return err
}
updateHostPrefix(req, endpointsID, resolveService)
}
protocol.HostPrefixBuilder{
Prefix: outpostAccessPointPrefixTemplate,
LabelsFn: o.hostPrefixLabelValues,
}.Build(req)
// set the signing region, name to resolved names from ARN
redirectSigner(req, resolveService, resolveRegion)
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
if err != nil {
return s3shared.NewInvalidARNError(o, err)
}
return nil
}
func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
return map[string]string{
accessPointPrefixLabel: o.AccessPointName,
accountIDPrefixLabel: o.AccountID,
outpostPrefixLabel: o.OutpostID,
}
}
func resolveRegionalEndpoint(r *request.Request, region, resolvedRegion, endpointsID string) (endpoints.ResolvedEndpoint, error) {
return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) {
opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
opts.UseDualStackEndpoint = r.Config.UseDualStackEndpoint
opts.UseFIPSEndpoint = r.Config.UseFIPSEndpoint
opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
opts.ResolvedRegion = resolvedRegion
opts.Logger = r.Config.Logger
opts.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated)
})
}
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
if err != nil {
return awserr.New(request.ErrCodeSerialization,
"failed to parse endpoint URL", err)
}
return nil
}
// redirectSigner sets signing name, signing region for a request
func redirectSigner(req *request.Request, signingName string, signingRegion string) {
req.ClientInfo.SigningName = signingName
req.ClientInfo.SigningRegion = signingRegion
}
func updateS3HostForS3AccessPoint(req *request.Request) {
updateHostPrefix(req, "s3", s3AccessPointNamespace)
}
func updateS3HostPrefixForS3ObjectLambda(req *request.Request) {
updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace)
}
func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) {
host := req.HTTPRequest.URL.Host
if strings.HasPrefix(host, oldEndpointPrefix) {
// replace service hostlabel oldEndpointPrefix to newEndpointPrefix
req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):]
}
}

60
vendor/github.com/aws/aws-sdk-go/service/s3/errors.go generated vendored Normal file
View File

@@ -0,0 +1,60 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
const (
// ErrCodeBucketAlreadyExists for service response error code
// "BucketAlreadyExists".
//
// The requested bucket name is not available. The bucket namespace is shared
// by all users of the system. Select a different name and try again.
ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
// ErrCodeBucketAlreadyOwnedByYou for service response error code
// "BucketAlreadyOwnedByYou".
//
// The bucket you tried to create already exists, and you own it. Amazon S3
// returns this error in all Amazon Web Services Regions except in the North
// Virginia Region. For legacy compatibility, if you re-create an existing bucket
// that you already own in the North Virginia Region, Amazon S3 returns 200
// OK and resets the bucket access control lists (ACLs).
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
// ErrCodeInvalidObjectState for service response error code
// "InvalidObjectState".
//
// Object is archived and inaccessible until restored.
ErrCodeInvalidObjectState = "InvalidObjectState"
// ErrCodeNoSuchBucket for service response error code
// "NoSuchBucket".
//
// The specified bucket does not exist.
ErrCodeNoSuchBucket = "NoSuchBucket"
// ErrCodeNoSuchKey for service response error code
// "NoSuchKey".
//
// The specified key does not exist.
ErrCodeNoSuchKey = "NoSuchKey"
// ErrCodeNoSuchUpload for service response error code
// "NoSuchUpload".
//
// The specified multipart upload does not exist.
ErrCodeNoSuchUpload = "NoSuchUpload"
// ErrCodeObjectAlreadyInActiveTierError for service response error code
// "ObjectAlreadyInActiveTierError".
//
// This action is not allowed against this storage tier.
ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
// ErrCodeObjectNotInActiveTierError for service response error code
// "ObjectNotInActiveTierError".
//
// The source object of the COPY action is not in the active tier and is only
// stored in Amazon S3 Glacier.
ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
)

View File

@@ -0,0 +1,136 @@
package s3
import (
"fmt"
"net/url"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// an operationBlacklist is a list of operation names that should a
// request handler should not be executed with.
type operationBlacklist []string
// Continue will return true of the Request's operation name is not
// in the blacklist. False otherwise.
func (b operationBlacklist) Continue(r *request.Request) bool {
for i := 0; i < len(b); i++ {
if b[i] == r.Operation.Name {
return false
}
}
return true
}
var accelerateOpBlacklist = operationBlacklist{
opListBuckets, opCreateBucket, opDeleteBucket,
}
// Automatically add the bucket name to the endpoint domain
// if possible. This style of bucket is valid for all bucket names which are
// DNS compatible and do not contain "."
func updateEndpointForS3Config(r *request.Request, bucketName string) {
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
if accelerate && accelerateOpBlacklist.Continue(r) {
if forceHostStyle {
if r.Config.Logger != nil {
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
}
}
updateEndpointForAccelerate(r, bucketName)
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
updateEndpointForHostStyle(r, bucketName)
}
}
func updateEndpointForHostStyle(r *request.Request, bucketName string) {
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
// bucket name must be valid to put into the host
return
}
moveBucketToHost(r.HTTPRequest.URL, bucketName)
}
var (
accelElem = []byte("s3-accelerate.dualstack.")
)
func updateEndpointForAccelerate(r *request.Request, bucketName string) {
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
r.Error = awserr.New("InvalidParameterException",
fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName),
nil)
return
}
parts := strings.Split(r.HTTPRequest.URL.Host, ".")
if len(parts) < 3 {
r.Error = awserr.New("InvalidParameterExecption",
fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
r.HTTPRequest.URL.Host), nil)
return
}
if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
parts[0] = "s3-accelerate"
}
for i := 1; i+1 < len(parts); i++ {
if parts[i] == aws.StringValue(r.Config.Region) {
parts = append(parts[:i], parts[i+1:]...)
break
}
}
r.HTTPRequest.URL.Host = strings.Join(parts, ".")
moveBucketToHost(r.HTTPRequest.URL, bucketName)
}
// Attempts to retrieve the bucket name from the request input parameters.
// If no bucket is found, or the field is empty "", false will be returned.
func bucketNameFromReqParams(params interface{}) (string, bool) {
if iface, ok := params.(bucketGetter); ok {
b := iface.getBucket()
return b, len(b) > 0
}
return "", false
}
// hostCompatibleBucketName returns true if the request should
// put the bucket in the host. This is false if S3ForcePathStyle is
// explicitly set or if the bucket is not DNS compatible.
func hostCompatibleBucketName(u *url.URL, bucket string) bool {
// Bucket might be DNS compatible but dots in the hostname will fail
// certificate validation, so do not use host-style.
if u.Scheme == "https" && strings.Contains(bucket, ".") {
return false
}
// if the bucket is DNS compatible
return dnsCompatibleBucketName(bucket)
}
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
// Buckets created outside of the classic region MUST be DNS compatible.
func dnsCompatibleBucketName(bucket string) bool {
return reDomain.MatchString(bucket) &&
!reIPAddress.MatchString(bucket) &&
!strings.Contains(bucket, "..")
}
// moveBucketToHost moves the bucket name from the URI path to URL host.
func moveBucketToHost(u *url.URL, bucket string) {
u.Host = bucket + "." + u.Host
removeBucketFromPath(u)
}

View File

@@ -0,0 +1,9 @@
//go:build !go1.6
// +build !go1.6
package s3
import "github.com/aws/aws-sdk-go/aws/request"
func platformRequestHandlers(r *request.Request) {
}

View File

@@ -0,0 +1,29 @@
//go:build go1.6
// +build go1.6
package s3
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
func platformRequestHandlers(r *request.Request) {
if r.Operation.HTTPMethod == "PUT" {
// 100-Continue should only be used on put requests.
r.Handlers.Sign.PushBack(add100Continue)
}
}
func add100Continue(r *request.Request) {
if aws.BoolValue(r.Config.S3Disable100Continue) {
return
}
if r.HTTPRequest.ContentLength < 1024*1024*2 {
// Ignore requests smaller than 2MB. This helps prevent delaying
// requests unnecessarily.
return
}
r.HTTPRequest.Header.Set("Expect", "100-Continue")
}

View File

@@ -0,0 +1,479 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package s3iface
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
)
// S3API provides an interface to enable mocking the
// s3.S3 service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // Amazon Simple Storage Service.
// func myFunc(svc s3iface.S3API) bool {
// // Make svc.AbortMultipartUpload request
// }
//
// func main() {
// sess := session.New()
// svc := s3.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockS3Client struct {
// s3iface.S3API
// }
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockS3Client{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
type S3API interface {
AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error)
AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error)
CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error)
CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error)
CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput)
DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error)
DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error)
DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error)
DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput)
DeleteBucketIntelligentTieringConfiguration(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error)
DeleteBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.DeleteBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error)
DeleteBucketIntelligentTieringConfigurationRequest(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput)
DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error)
DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error)
DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput)
DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error)
DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error)
DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error)
DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput)
DeleteBucketOwnershipControls(*s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error)
DeleteBucketOwnershipControlsWithContext(aws.Context, *s3.DeleteBucketOwnershipControlsInput, ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error)
DeleteBucketOwnershipControlsRequest(*s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput)
DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error)
DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error)
DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error)
DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput)
DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error)
DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error)
DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error)
DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error)
DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput)
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error)
DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error)
DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error)
DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput)
GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error)
GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error)
GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput)
GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error)
GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error)
GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error)
GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput)
GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error)
GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error)
GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error)
GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput)
GetBucketIntelligentTieringConfiguration(*s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error)
GetBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.GetBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error)
GetBucketIntelligentTieringConfigurationRequest(*s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput)
GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error)
GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error)
GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput)
GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error)
GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error)
GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error)
GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error)
GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error)
GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error)
GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput)
GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error)
GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error)
GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
GetBucketOwnershipControls(*s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error)
GetBucketOwnershipControlsWithContext(aws.Context, *s3.GetBucketOwnershipControlsInput, ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error)
GetBucketOwnershipControlsRequest(*s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput)
GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error)
GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error)
GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error)
GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput)
GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error)
GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error)
GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput)
GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error)
GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput)
GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error)
GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error)
GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error)
GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
GetObjectAttributes(*s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
GetObjectAttributesWithContext(aws.Context, *s3.GetObjectAttributesInput, ...request.Option) (*s3.GetObjectAttributesOutput, error)
GetObjectAttributesRequest(*s3.GetObjectAttributesInput) (*request.Request, *s3.GetObjectAttributesOutput)
GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error)
GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error)
GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput)
GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error)
GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error)
GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput)
GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error)
GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error)
GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput)
GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error)
GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error)
GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error)
GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error)
GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput)
HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error)
HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error)
HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput)
ListBucketIntelligentTieringConfigurations(*s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error)
ListBucketIntelligentTieringConfigurationsWithContext(aws.Context, *s3.ListBucketIntelligentTieringConfigurationsInput, ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error)
ListBucketIntelligentTieringConfigurationsRequest(*s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput)
ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error)
ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error)
ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput)
ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error)
ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error)
ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput)
ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error
ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error)
ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error
ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error)
ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error
ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error)
ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output)
ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error
ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error
ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error)
ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error
PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error)
PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error)
PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput)
PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error)
PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error)
PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error)
PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput)
PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error)
PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error)
PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error)
PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput)
PutBucketIntelligentTieringConfiguration(*s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error)
PutBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.PutBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error)
PutBucketIntelligentTieringConfigurationRequest(*s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput)
PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error)
PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error)
PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput)
PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error)
PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error)
PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error)
PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error)
PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error)
PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput)
PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error)
PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error)
PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
PutBucketOwnershipControls(*s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error)
PutBucketOwnershipControlsWithContext(aws.Context, *s3.PutBucketOwnershipControlsInput, ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error)
PutBucketOwnershipControlsRequest(*s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput)
PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error)
PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)
PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error)
PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error)
PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput)
PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error)
PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput)
PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error)
PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error)
PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error)
PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error)
PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error)
PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error)
PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput)
PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error)
PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error)
PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput)
PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error)
PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error)
PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput)
PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error)
PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error)
PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error)
PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput)
RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error)
RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error)
SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error)
SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput)
UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error)
UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error)
UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
WriteGetObjectResponse(*s3.WriteGetObjectResponseInput) (*s3.WriteGetObjectResponseOutput, error)
WriteGetObjectResponseWithContext(aws.Context, *s3.WriteGetObjectResponseInput, ...request.Option) (*s3.WriteGetObjectResponseOutput, error)
WriteGetObjectResponseRequest(*s3.WriteGetObjectResponseInput) (*request.Request, *s3.WriteGetObjectResponseOutput)
WaitUntilBucketExists(*s3.HeadBucketInput) error
WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
WaitUntilBucketNotExists(*s3.HeadBucketInput) error
WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
WaitUntilObjectExists(*s3.HeadObjectInput) error
WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
WaitUntilObjectNotExists(*s3.HeadObjectInput) error
WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
}
var _ S3API = (*s3.S3)(nil)

View File

@@ -0,0 +1,23 @@
package s3manager
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/arn"
)
func validateSupportedARNType(bucket string) error {
if !arn.IsARN(bucket) {
return nil
}
parsedARN, err := arn.Parse(bucket)
if err != nil {
return err
}
if parsedARN.Service == "s3-object-lambda" {
return fmt.Errorf("manager does not support s3-object-lambda service ARNs")
}
return nil
}

View File

@@ -0,0 +1,529 @@
package s3manager
import (
"bytes"
"fmt"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
const (
// DefaultBatchSize is the batch size we initialize when constructing a batch delete client.
// This value is used when calling DeleteObjects. This represents how many objects to delete
// per DeleteObjects call.
DefaultBatchSize = 100
)
// BatchError will contain the key and bucket of the object that failed to
// either upload or download.
type BatchError struct {
Errors Errors
code string
message string
}
// Errors is a typed alias for a slice of errors to satisfy the error
// interface.
type Errors []Error
func (errs Errors) Error() string {
buf := bytes.NewBuffer(nil)
for i, err := range errs {
buf.WriteString(err.Error())
if i+1 < len(errs) {
buf.WriteString("\n")
}
}
return buf.String()
}
// Error will contain the original error, bucket, and key of the operation that failed
// during batch operations.
type Error struct {
OrigErr error
Bucket *string
Key *string
}
func newError(err error, bucket, key *string) Error {
return Error{
err,
bucket,
key,
}
}
func (err *Error) Error() string {
origErr := ""
if err.OrigErr != nil {
origErr = ":\n" + err.OrigErr.Error()
}
return fmt.Sprintf("failed to perform batch operation on %q to %q%s",
aws.StringValue(err.Key),
aws.StringValue(err.Bucket),
origErr,
)
}
// NewBatchError will return a BatchError that satisfies the awserr.Error interface.
func NewBatchError(code, message string, err []Error) awserr.Error {
return &BatchError{
Errors: err,
code: code,
message: message,
}
}
// Code will return the code associated with the batch error.
func (err *BatchError) Code() string {
return err.code
}
// Message will return the message associated with the batch error.
func (err *BatchError) Message() string {
return err.message
}
func (err *BatchError) Error() string {
return awserr.SprintError(err.Code(), err.Message(), "", err.Errors)
}
// OrigErr will return the original error. Which, in this case, will always be nil
// for batched operations.
func (err *BatchError) OrigErr() error {
return err.Errors
}
// BatchDeleteIterator is an interface that uses the scanner pattern to
// iterate through what needs to be deleted.
type BatchDeleteIterator interface {
Next() bool
Err() error
DeleteObject() BatchDeleteObject
}
// DeleteListIterator is an alternative iterator for the BatchDelete client. This will
// iterate through a list of objects and delete the objects.
//
// Example:
// iter := &s3manager.DeleteListIterator{
// Client: svc,
// Input: &s3.ListObjectsInput{
// Bucket: aws.String("bucket"),
// MaxKeys: aws.Int64(5),
// },
// Paginator: request.Pagination{
// NewRequest: func() (*request.Request, error) {
// var inCpy *ListObjectsInput
// if input != nil {
// tmp := *input
// inCpy = &tmp
// }
// req, _ := c.ListObjectsRequest(inCpy)
// return req, nil
// },
// },
// }
//
// batcher := s3manager.NewBatchDeleteWithClient(svc)
// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil {
// return err
// }
type DeleteListIterator struct {
Bucket *string
Paginator request.Pagination
objects []*s3.Object
}
// NewDeleteListIterator will return a new DeleteListIterator.
func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator {
iter := &DeleteListIterator{
Bucket: input.Bucket,
Paginator: request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *s3.ListObjectsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := svc.ListObjectsRequest(inCpy)
return req, nil
},
},
}
for _, opt := range opts {
opt(iter)
}
return iter
}
// Next will use the S3API client to iterate through a list of objects.
func (iter *DeleteListIterator) Next() bool {
if len(iter.objects) > 0 {
iter.objects = iter.objects[1:]
}
if len(iter.objects) == 0 && iter.Paginator.Next() {
iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents
}
return len(iter.objects) > 0
}
// Err will return the last known error from Next.
func (iter *DeleteListIterator) Err() error {
return iter.Paginator.Err()
}
// DeleteObject will return the current object to be deleted.
func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject {
return BatchDeleteObject{
Object: &s3.DeleteObjectInput{
Bucket: iter.Bucket,
Key: iter.objects[0].Key,
},
}
}
// BatchDelete will use the s3 package's service client to perform a batch
// delete.
type BatchDelete struct {
Client s3iface.S3API
BatchSize int
}
// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of
// objects.
//
// Example:
// batcher := s3manager.NewBatchDeleteWithClient(client, size)
//
// objects := []BatchDeleteObject{
// {
// Object: &s3.DeleteObjectInput {
// Key: aws.String("key"),
// Bucket: aws.String("bucket"),
// },
// },
// }
//
// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
// Objects: objects,
// }); err != nil {
// return err
// }
func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete {
svc := &BatchDelete{
Client: client,
BatchSize: DefaultBatchSize,
}
for _, opt := range options {
opt(svc)
}
return svc
}
// NewBatchDelete will return a new delete client that can delete a batched amount of
// objects.
//
// Example:
// batcher := s3manager.NewBatchDelete(sess, size)
//
// objects := []BatchDeleteObject{
// {
// Object: &s3.DeleteObjectInput {
// Key: aws.String("key"),
// Bucket: aws.String("bucket"),
// },
// },
// }
//
// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
// Objects: objects,
// }); err != nil {
// return err
// }
func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete {
client := s3.New(c)
return NewBatchDeleteWithClient(client, options...)
}
// BatchDeleteObject is a wrapper object for calling the batch delete operation.
type BatchDeleteObject struct {
Object *s3.DeleteObjectInput
// After will run after each iteration during the batch process. This function will
// be executed whether or not the request was successful.
After func() error
}
// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate
// through a series of objects to be deleted.
type DeleteObjectsIterator struct {
Objects []BatchDeleteObject
index int
inc bool
}
// Next will increment the default iterator's index and ensure that there
// is another object to iterator to.
func (iter *DeleteObjectsIterator) Next() bool {
if iter.inc {
iter.index++
} else {
iter.inc = true
}
return iter.index < len(iter.Objects)
}
// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
// this will only return nil.
func (iter *DeleteObjectsIterator) Err() error {
return nil
}
// DeleteObject will return the BatchDeleteObject at the current batched index.
func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject {
object := iter.Objects[iter.index]
return object
}
// Delete will use the iterator to queue up objects that need to be deleted.
// Once the batch size is met, this will call the deleteBatch function.
func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
var errs []Error
objects := []BatchDeleteObject{}
var input *s3.DeleteObjectsInput
for iter.Next() {
o := iter.DeleteObject()
if input == nil {
input = initDeleteObjectsInput(o.Object)
}
parity := hasParity(input, o)
if parity {
input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
Key: o.Object.Key,
VersionId: o.Object.VersionId,
})
objects = append(objects, o)
}
if len(input.Delete.Objects) == d.BatchSize || !parity {
if err := deleteBatch(ctx, d, input, objects); err != nil {
errs = append(errs, err...)
}
objects = objects[:0]
input = nil
if !parity {
objects = append(objects, o)
input = initDeleteObjectsInput(o.Object)
input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
Key: o.Object.Key,
VersionId: o.Object.VersionId,
})
}
}
}
// iter.Next() could return false (above) plus populate iter.Err()
if iter.Err() != nil {
errs = append(errs, newError(iter.Err(), nil, nil))
}
if input != nil && len(input.Delete.Objects) > 0 {
if err := deleteBatch(ctx, d, input, objects); err != nil {
errs = append(errs, err...)
}
}
if len(errs) > 0 {
return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs)
}
return nil
}
func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput {
return &s3.DeleteObjectsInput{
Bucket: o.Bucket,
MFA: o.MFA,
RequestPayer: o.RequestPayer,
Delete: &s3.Delete{},
}
}
const (
// ErrDeleteBatchFailCode represents an error code which will be returned
// only when DeleteObjects.Errors has an error that does not contain a code.
ErrDeleteBatchFailCode = "DeleteBatchError"
errDefaultDeleteBatchMessage = "failed to delete"
)
// deleteBatch will delete a batch of items in the objects parameters.
func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error {
errs := []Error{}
if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil {
for i := 0; i < len(input.Delete.Objects); i++ {
errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key))
}
} else if len(result.Errors) > 0 {
for i := 0; i < len(result.Errors); i++ {
code := ErrDeleteBatchFailCode
msg := errDefaultDeleteBatchMessage
if result.Errors[i].Message != nil {
msg = *result.Errors[i].Message
}
if result.Errors[i].Code != nil {
code = *result.Errors[i].Code
}
errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key))
}
}
for _, object := range objects {
if object.After == nil {
continue
}
if err := object.After(); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
}
return errs
}
func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool {
if o1.Bucket != nil && o2.Object.Bucket != nil {
if *o1.Bucket != *o2.Object.Bucket {
return false
}
} else if o1.Bucket != o2.Object.Bucket {
return false
}
if o1.MFA != nil && o2.Object.MFA != nil {
if *o1.MFA != *o2.Object.MFA {
return false
}
} else if o1.MFA != o2.Object.MFA {
return false
}
if o1.RequestPayer != nil && o2.Object.RequestPayer != nil {
if *o1.RequestPayer != *o2.Object.RequestPayer {
return false
}
} else if o1.RequestPayer != o2.Object.RequestPayer {
return false
}
return true
}
// BatchDownloadIterator is an interface that uses the scanner pattern to iterate
// through a series of objects to be downloaded.
type BatchDownloadIterator interface {
Next() bool
Err() error
DownloadObject() BatchDownloadObject
}
// BatchDownloadObject contains all necessary information to run a batch operation once.
type BatchDownloadObject struct {
Object *s3.GetObjectInput
Writer io.WriterAt
// After will run after each iteration during the batch process. This function will
// be executed whether or not the request was successful.
After func() error
}
// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched
// download of objects.
type DownloadObjectsIterator struct {
Objects []BatchDownloadObject
index int
inc bool
}
// Next will increment the default iterator's index and ensure that there
// is another object to iterator to.
func (batcher *DownloadObjectsIterator) Next() bool {
if batcher.inc {
batcher.index++
} else {
batcher.inc = true
}
return batcher.index < len(batcher.Objects)
}
// DownloadObject will return the BatchDownloadObject at the current batched index.
func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject {
object := batcher.Objects[batcher.index]
return object
}
// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
// this will only return nil.
func (batcher *DownloadObjectsIterator) Err() error {
return nil
}
// BatchUploadIterator is an interface that uses the scanner pattern to
// iterate through what needs to be uploaded.
type BatchUploadIterator interface {
Next() bool
Err() error
UploadObject() BatchUploadObject
}
// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched
// upload of objects.
type UploadObjectsIterator struct {
Objects []BatchUploadObject
index int
inc bool
}
// Next will increment the default iterator's index and ensure that there
// is another object to iterator to.
func (batcher *UploadObjectsIterator) Next() bool {
if batcher.inc {
batcher.index++
} else {
batcher.inc = true
}
return batcher.index < len(batcher.Objects)
}
// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface
// this will only return nil.
func (batcher *UploadObjectsIterator) Err() error {
return nil
}
// UploadObject will return the BatchUploadObject at the current batched index.
func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject {
object := batcher.Objects[batcher.index]
return object
}
// BatchUploadObject contains all necessary information to run a batch operation once.
type BatchUploadObject struct {
Object *UploadInput
// After will run after each iteration during the batch process. This function will
// be executed whether or not the request was successful.
After func() error
}

View File

@@ -0,0 +1,159 @@
package s3manager
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// GetBucketRegion will attempt to get the region for a bucket using the
// regionHint to determine which AWS partition to perform the query on.
//
// The request will not be signed, and will not use your AWS credentials.
//
// A "NotFound" error code will be returned if the bucket does not exist in the
// AWS partition the regionHint belongs to. If the regionHint parameter is an
// empty string GetBucketRegion will fallback to the ConfigProvider's region
// config. If the regionHint is empty, and the ConfigProvider does not have a
// region value, an error will be returned..
//
// For example to get the region of a bucket which exists in "eu-central-1"
// you could provide a region hint of "us-west-2".
//
// sess := session.Must(session.NewSession())
//
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
// By default the request will be made to the Amazon S3 endpoint using the Path
// style addressing.
//
// s3.us-west-2.amazonaws.com/bucketname
//
// This is not compatible with Amazon S3's FIPS endpoints. To override this
// behavior to use Virtual Host style addressing, provide a functional option
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
//
// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
//
// To configure the GetBucketRegion to make a request via the Amazon
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the
// utility is called with. The hint region will be ignored if an endpoint URL
// is configured on the session or client.
//
// sess, err := session.NewSession(&aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// })
//
// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "")
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
var cfg aws.Config
if len(regionHint) != 0 {
cfg.Region = aws.String(regionHint)
}
svc := s3.New(c, &cfg)
return GetBucketRegionWithClient(ctx, svc, bucket, opts...)
}
const bucketRegionHeader = "X-Amz-Bucket-Region"
// GetBucketRegionWithClient is the same as GetBucketRegion with the exception
// that it takes a S3 service client instead of a Session. The regionHint is
// derived from the region the S3 service client was created in.
//
// By default the request will be made to the Amazon S3 endpoint using the Path
// style addressing.
//
// s3.us-west-2.amazonaws.com/bucketname
//
// This is not compatible with Amazon S3's FIPS endpoints. To override this
// behavior to use Virtual Host style addressing, provide a functional option
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
//
// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
//
// To configure the GetBucketRegion to make a request via the Amazon
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the
// utility is called with. The hint region will be ignored if an endpoint URL
// is configured on the session or client.
//
// region, err := s3manager.GetBucketRegionWithClient(context.Background(),
// s3.New(sess, &aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// }),
// "bucketname")
//
// See GetBucketRegion for more information.
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{
Bucket: aws.String(bucket),
})
req.Config.S3ForcePathStyle = aws.Bool(true)
req.Config.Credentials = credentials.AnonymousCredentials
req.SetContext(ctx)
// Disable HTTP redirects to prevent an invalid 301 from eating the response
// because Go's HTTP client will fail, and drop the response if an 301 is
// received without a location header. S3 will return a 301 without the
// location header for HeadObject API calls.
req.DisableFollowRedirects = true
var bucketRegion string
req.Handlers.Send.PushBack(func(r *request.Request) {
bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader)
if len(bucketRegion) == 0 {
return
}
r.HTTPResponse.StatusCode = 200
r.HTTPResponse.Status = "OK"
r.Error = nil
})
// Replace the endpoint validation handler to not require a region if an
// endpoint URL was specified. Since these requests are not authenticated,
// requiring a region is not needed when an endpoint URL is provided.
req.Handlers.Validate.Swap(
corehandlers.ValidateEndpointHandler.Name,
request.NamedHandler{
Name: "validateEndpointWithoutRegion",
Fn: validateEndpointWithoutRegion,
},
)
req.ApplyOptions(opts...)
if err := req.Send(); err != nil {
return "", err
}
bucketRegion = s3.NormalizeBucketLocation(bucketRegion)
return bucketRegion, nil
}
func validateEndpointWithoutRegion(r *request.Request) {
// Check if the caller provided an explicit URL instead of one derived by
// the SDK's endpoint resolver. For GetBucketRegion, with an explicit
// endpoint URL, a region is not needed. If no endpoint URL is provided,
// fallback the SDK's standard endpoint validation handler.
if len(aws.StringValue(r.Config.Endpoint)) == 0 {
corehandlers.ValidateEndpointHandler.Fn(r)
}
}

View File

@@ -0,0 +1,81 @@
package s3manager
import (
"io"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// BufferedReadSeeker is buffered io.ReadSeeker
type BufferedReadSeeker struct {
r io.ReadSeeker
buffer []byte
readIdx, writeIdx int
}
// NewBufferedReadSeeker returns a new BufferedReadSeeker
// if len(b) == 0 then the buffer will be initialized to 64 KiB.
func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker {
if len(b) == 0 {
b = make([]byte, 64*1024)
}
return &BufferedReadSeeker{r: r, buffer: b}
}
func (b *BufferedReadSeeker) reset(r io.ReadSeeker) {
b.r = r
b.readIdx, b.writeIdx = 0, 0
}
// Read will read up len(p) bytes into p and will return
// the number of bytes read and any error that occurred.
// If the len(p) > the buffer size then a single read request
// will be issued to the underlying io.ReadSeeker for len(p) bytes.
// A Read request will at most perform a single Read to the underlying
// io.ReadSeeker, and may return < len(p) if serviced from the buffer.
func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return n, err
}
if b.readIdx == b.writeIdx {
if len(p) >= len(b.buffer) {
n, err = b.r.Read(p)
return n, err
}
b.readIdx, b.writeIdx = 0, 0
n, err = b.r.Read(b.buffer)
if n == 0 {
return n, err
}
b.writeIdx += n
}
n = copy(p, b.buffer[b.readIdx:b.writeIdx])
b.readIdx += n
return n, err
}
// Seek will position then underlying io.ReadSeeker to the given offset
// and will clear the buffer.
func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
n, err := b.r.Seek(offset, whence)
b.reset(b.r)
return n, err
}
// ReadAt will read up to len(p) bytes at the given file offset.
// This will result in the buffer being cleared.
func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
_, err := b.Seek(off, sdkio.SeekStart)
if err != nil {
return 0, err
}
return b.Read(p)
}

View File

@@ -0,0 +1,8 @@
//go:build !windows
// +build !windows
package s3manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return nil
}

View File

@@ -0,0 +1,5 @@
package s3manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return NewBufferedReadSeekerWriteToPool(1024 * 1024)
}

View File

@@ -0,0 +1,8 @@
//go:build !windows
// +build !windows
package s3manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return nil
}

View File

@@ -0,0 +1,5 @@
package s3manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return NewPooledBufferedWriterReadFromProvider(1024 * 1024)
}

View File

@@ -0,0 +1,3 @@
// Package s3manager provides utilities to upload and download objects from
// S3 concurrently. Helpful for when working with large objects.
package s3manager

View File

@@ -0,0 +1,601 @@
package s3manager
import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// DefaultDownloadPartSize is the default range of bytes to get at a time when
// using Download().
const DefaultDownloadPartSize = 1024 * 1024 * 5
// DefaultDownloadConcurrency is the default number of goroutines to spin up
// when using Download().
const DefaultDownloadConcurrency = 5
type errReadingBody struct {
err error
}
func (e *errReadingBody) Error() string {
return fmt.Sprintf("failed to read part body: %v", e.err)
}
func (e *errReadingBody) Unwrap() error {
return e.err
}
// The Downloader structure that calls Download(). It is safe to call Download()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Downloader's properties is not safe to be done concurrently.
type Downloader struct {
// The size (in bytes) to request from S3 for each part.
// The minimum allowed part size is 5MB, and if this value is set to zero,
// the DefaultDownloadPartSize value will be used.
//
// PartSize is ignored if the Range input parameter is provided.
PartSize int64
// The number of goroutines to spin up in parallel when sending parts.
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
//
// Concurrency of 1 will download the parts sequentially.
//
// Concurrency is ignored if the Range input parameter is provided.
Concurrency int
// An S3 client to use when performing downloads.
S3 s3iface.S3API
// List of request options that will be passed down to individual API
// operation requests made by the downloader.
RequestOptions []request.Option
// Defines the buffer strategy used when downloading a part.
//
// If a WriterReadFromProvider is given the Download manager
// will pass the io.WriterAt of the Download request to the provider
// and will use the returned WriterReadFrom from the provider as the
// destination writer when copying from http response body.
BufferProvider WriterReadFromProvider
}
// WithDownloaderRequestOptions appends to the Downloader's API request options.
func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
return func(d *Downloader) {
d.RequestOptions = append(d.RequestOptions, opts...)
}
}
// NewDownloader creates a new Downloader instance to downloads objects from
// S3 in concurrent chunks. Pass in additional functional options to customize
// the downloader behavior. Requires a client.ConfigProvider in order to create
// a S3 service client. The session.Session satisfies the client.ConfigProvider
// interface.
//
// Example:
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
//
// // Create a downloader with the session and custom options
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
return newDownloader(s3.New(c), options...)
}
func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader {
d := &Downloader{
S3: client,
PartSize: DefaultDownloadPartSize,
Concurrency: DefaultDownloadConcurrency,
BufferProvider: defaultDownloadBufferProvider(),
}
for _, option := range options {
option(d)
}
return d
}
// NewDownloaderWithClient creates a new Downloader instance to downloads
// objects from S3 in concurrent chunks. Pass in additional functional
// options to customize the downloader behavior. Requires a S3 service client
// to make S3 API calls.
//
// Example:
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // The S3 client the S3 Downloader will use
// s3Svc := s3.New(sess)
//
// // Create a downloader with the s3 client and default options
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
//
// // Create a downloader with the s3 client and custom options
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
return newDownloader(svc, options...)
}
type maxRetrier interface {
MaxRetries() int
}
// Download downloads an object in S3 and writes the payload into w using
// concurrent GET requests. The n int64 returned is the size of the object downloaded
// in bytes.
//
// Additional functional options can be provided to configure the individual
// download. These options are copies of the Downloader instance Download is called from.
// Modifying the options will not impact the original Downloader instance.
//
// It is safe to call this method concurrently across goroutines.
//
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
//
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
// download the parts from S3 sequentially.
//
// If the GetObjectInput's Range value is provided that will cause the downloader
// to perform a single GetObjectInput request for that object's range. This will
// caused the part size, and concurrency configurations to be ignored.
func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...)
}
// DownloadWithContext downloads an object in S3 and writes the payload into w
// using concurrent GET requests. The n int64 returned is the size of the object downloaded
// in bytes.
//
// DownloadWithContext is the same as Download with the additional support for
// Context input parameters. The Context must not be nil. A nil Context will
// cause a panic. Use the Context to add deadlining, timeouts, etc. The
// DownloadWithContext may create sub-contexts for individual underlying
// requests.
//
// Additional functional options can be provided to configure the individual
// download. These options are copies of the Downloader instance Download is
// called from. Modifying the options will not impact the original Downloader
// instance. Use the WithDownloaderRequestOptions helper function to pass in request
// options that will be applied to all API operations made with this downloader.
//
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
//
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
// download the parts from S3 sequentially.
//
// It is safe to call this method concurrently across goroutines.
//
// If the GetObjectInput's Range value is provided that will cause the downloader
// to perform a single GetObjectInput request for that object's range. This will
// caused the part size, and concurrency configurations to be ignored.
func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
if err := validateSupportedARNType(aws.StringValue(input.Bucket)); err != nil {
return 0, err
}
impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
for _, option := range options {
option(&impl.cfg)
}
impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
if s, ok := d.S3.(maxRetrier); ok {
impl.partBodyMaxRetries = s.MaxRetries()
}
impl.totalBytes = -1
if impl.cfg.Concurrency == 0 {
impl.cfg.Concurrency = DefaultDownloadConcurrency
}
if impl.cfg.PartSize == 0 {
impl.cfg.PartSize = DefaultDownloadPartSize
}
return impl.download()
}
// DownloadWithIterator will download a batched amount of objects in S3 and writes them
// to the io.WriterAt specificed in the iterator.
//
// Example:
// svc := s3manager.NewDownloader(session)
//
// fooFile, err := os.Open("/tmp/foo.file")
// if err != nil {
// return err
// }
//
// barFile, err := os.Open("/tmp/bar.file")
// if err != nil {
// return err
// }
//
// objects := []s3manager.BatchDownloadObject {
// {
// Object: &s3.GetObjectInput {
// Bucket: aws.String("bucket"),
// Key: aws.String("foo"),
// },
// Writer: fooFile,
// },
// {
// Object: &s3.GetObjectInput {
// Bucket: aws.String("bucket"),
// Key: aws.String("bar"),
// },
// Writer: barFile,
// },
// }
//
// iter := &s3manager.DownloadObjectsIterator{Objects: objects}
// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil {
// return err
// }
func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error {
var errs []Error
for iter.Next() {
object := iter.DownloadObject()
if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
if object.After == nil {
continue
}
if err := object.After(); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
}
if len(errs) > 0 {
return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs)
}
return nil
}
// downloader is the implementation structure used internally by Downloader.
type downloader struct {
ctx aws.Context
cfg Downloader
in *s3.GetObjectInput
w io.WriterAt
wg sync.WaitGroup
m sync.Mutex
pos int64
totalBytes int64
written int64
err error
partBodyMaxRetries int
}
// download performs the implementation of the object download across ranged
// GETs.
func (d *downloader) download() (n int64, err error) {
// If range is specified fall back to single download of that range
// this enables the functionality of ranged gets with the downloader but
// at the cost of no multipart downloads.
if rng := aws.StringValue(d.in.Range); len(rng) > 0 {
d.downloadRange(rng)
return d.written, d.err
}
// Spin off first worker to check additional header information
d.getChunk()
if total := d.getTotalBytes(); total >= 0 {
// Spin up workers
ch := make(chan dlchunk, d.cfg.Concurrency)
for i := 0; i < d.cfg.Concurrency; i++ {
d.wg.Add(1)
go d.downloadPart(ch)
}
// Assign work
for d.getErr() == nil {
if d.pos >= total {
break // We're finished queuing chunks
}
// Queue the next range of bytes to read.
ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
d.pos += d.cfg.PartSize
}
// Wait for completion
close(ch)
d.wg.Wait()
} else {
// Checking if we read anything new
for d.err == nil {
d.getChunk()
}
// We expect a 416 error letting us know we are done downloading the
// total bytes. Since we do not know the content's length, this will
// keep grabbing chunks of data until the range of bytes specified in
// the request is out of range of the content. Once, this happens, a
// 416 should occur.
e, ok := d.err.(awserr.RequestFailure)
if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {
d.err = nil
}
}
// Return error
return d.written, d.err
}
// downloadPart is an individual goroutine worker reading from the ch channel
// and performing a GetObject request on the data with a given byte range.
//
// If this is the first worker, this operation also resolves the total number
// of bytes to be read so that the worker manager knows when it is finished.
func (d *downloader) downloadPart(ch chan dlchunk) {
defer d.wg.Done()
for {
chunk, ok := <-ch
if !ok {
break
}
if d.getErr() != nil {
// Drain the channel if there is an error, to prevent deadlocking
// of download producer.
continue
}
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
}
}
}
// getChunk grabs a chunk of data from the body.
// Not thread safe. Should only used when grabbing data on a single thread.
func (d *downloader) getChunk() {
if d.getErr() != nil {
return
}
chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
d.pos += d.cfg.PartSize
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
}
}
// downloadRange downloads an Object given the passed in Byte-Range value.
// The chunk used down download the range will be configured for that range.
func (d *downloader) downloadRange(rng string) {
if d.getErr() != nil {
return
}
chunk := dlchunk{w: d.w, start: d.pos}
// Ranges specified will short circuit the multipart download
chunk.withRange = rng
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
}
// Update the position based on the amount of data received.
d.pos = d.written
}
// downloadChunk downloads the chunk from s3
func (d *downloader) downloadChunk(chunk dlchunk) error {
in := &s3.GetObjectInput{}
awsutil.Copy(in, d.in)
// Get the next byte range of data
in.Range = aws.String(chunk.ByteRange())
var n int64
var err error
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
n, err = d.tryDownloadChunk(in, &chunk)
if err == nil {
break
}
// Check if the returned error is an errReadingBody.
// If err is errReadingBody this indicates that an error
// occurred while copying the http response body.
// If this occurs we unwrap the err to set the underlying error
// and attempt any remaining retries.
if bodyErr, ok := err.(*errReadingBody); ok {
err = bodyErr.Unwrap()
} else {
return err
}
chunk.cur = 0
logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
aws.StringValue(in.Key), err, retry))
}
d.incrWritten(n)
return err
}
func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
cleanup := func() {}
if d.cfg.BufferProvider != nil {
w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
}
defer cleanup()
resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
if err != nil {
return 0, err
}
d.setTotalBytes(resp) // Set total if not yet set.
n, err := io.Copy(w, resp.Body)
resp.Body.Close()
if err != nil {
return n, &errReadingBody{err: err}
}
return n, nil
}
func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
s, ok := svc.(*s3.S3)
if !ok {
return
}
if s.Config.Logger == nil {
return
}
if s.Config.LogLevel.Matches(level) {
s.Config.Logger.Log(msg)
}
}
// getTotalBytes is a thread-safe getter for retrieving the total byte status.
func (d *downloader) getTotalBytes() int64 {
d.m.Lock()
defer d.m.Unlock()
return d.totalBytes
}
// setTotalBytes is a thread-safe setter for setting the total byte status.
// Will extract the object's total bytes from the Content-Range if the file
// will be chunked, or Content-Length. Content-Length is used when the response
// does not include a Content-Range. Meaning the object was not chunked. This
// occurs when the full file fits within the PartSize directive.
func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
d.m.Lock()
defer d.m.Unlock()
if d.totalBytes >= 0 {
return
}
if resp.ContentRange == nil {
// ContentRange is nil when the full file contents is provided, and
// is not chunked. Use ContentLength instead.
if resp.ContentLength != nil {
d.totalBytes = *resp.ContentLength
return
}
} else {
parts := strings.Split(*resp.ContentRange, "/")
total := int64(-1)
var err error
// Checking for whether or not a numbered total exists
// If one does not exist, we will assume the total to be -1, undefined,
// and sequentially download each chunk until hitting a 416 error
totalStr := parts[len(parts)-1]
if totalStr != "*" {
total, err = strconv.ParseInt(totalStr, 10, 64)
if err != nil {
d.err = err
return
}
}
d.totalBytes = total
}
}
func (d *downloader) incrWritten(n int64) {
d.m.Lock()
defer d.m.Unlock()
d.written += n
}
// getErr is a thread-safe getter for the error object
func (d *downloader) getErr() error {
d.m.Lock()
defer d.m.Unlock()
return d.err
}
// setErr is a thread-safe setter for the error object
func (d *downloader) setErr(e error) {
d.m.Lock()
defer d.m.Unlock()
d.err = e
}
// dlchunk represents a single chunk of data to write by the worker routine.
// This structure also implements an io.SectionReader style interface for
// io.WriterAt, effectively making it an io.SectionWriter (which does not
// exist).
type dlchunk struct {
w io.WriterAt
start int64
size int64
cur int64
// specifies the byte range the chunk should be downloaded with.
withRange string
}
// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
// position to its end (or EOF).
//
// If a range is specified on the dlchunk the size will be ignored when writing.
// as the total size may not of be known ahead of time.
func (c *dlchunk) Write(p []byte) (n int, err error) {
if c.cur >= c.size && len(c.withRange) == 0 {
return 0, io.EOF
}
n, err = c.w.WriteAt(p, c.start+c.cur)
c.cur += int64(n)
return
}
// ByteRange returns a HTTP Byte-Range header value that should be used by the
// client to request the chunk's range.
func (c *dlchunk) ByteRange() string {
if len(c.withRange) != 0 {
return c.withRange
}
return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1)
}

View File

@@ -0,0 +1,252 @@
package s3manager
import (
"fmt"
"sync"
"github.com/aws/aws-sdk-go/aws"
)
type byteSlicePool interface {
Get(aws.Context) (*[]byte, error)
Put(*[]byte)
ModifyCapacity(int)
SliceSize() int64
Close()
}
type maxSlicePool struct {
// allocator is defined as a function pointer to allow
// for test cases to instrument custom tracers when allocations
// occur.
allocator sliceAllocator
slices chan *[]byte
allocations chan struct{}
capacityChange chan struct{}
max int
sliceSize int64
mtx sync.RWMutex
}
func newMaxSlicePool(sliceSize int64) *maxSlicePool {
p := &maxSlicePool{sliceSize: sliceSize}
p.allocator = p.newSlice
return p
}
var errZeroCapacity = fmt.Errorf("get called on zero capacity pool")
func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) {
// check if context is canceled before attempting to get a slice
// this ensures priority is given to the cancel case first
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
p.mtx.RLock()
for {
select {
case bs, ok := <-p.slices:
p.mtx.RUnlock()
if !ok {
// attempt to get on a zero capacity pool
return nil, errZeroCapacity
}
return bs, nil
case <-ctx.Done():
p.mtx.RUnlock()
return nil, ctx.Err()
default:
// pass
}
select {
case _, ok := <-p.allocations:
p.mtx.RUnlock()
if !ok {
// attempt to get on a zero capacity pool
return nil, errZeroCapacity
}
return p.allocator(), nil
case <-ctx.Done():
p.mtx.RUnlock()
return nil, ctx.Err()
default:
// In the event that there are no slices or allocations available
// This prevents some deadlock situations that can occur around sync.RWMutex
// When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock.
// By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where
// Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock,
// and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity.
// Short-circuit if the pool capacity is zero.
if p.max == 0 {
p.mtx.RUnlock()
return nil, errZeroCapacity
}
// Since we will be releasing the read-lock we need to take the reference to the channel.
// Since channels are references we will still get notified if slices are added, or if
// the channel is closed due to a capacity modification. This specifically avoids a data race condition
// where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock.
c := p.capacityChange
p.mtx.RUnlock()
select {
case _ = <-c:
p.mtx.RLock()
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
}
func (p *maxSlicePool) Put(bs *[]byte) {
p.mtx.RLock()
defer p.mtx.RUnlock()
if p.max == 0 {
return
}
select {
case p.slices <- bs:
p.notifyCapacity()
default:
// If the new channel when attempting to add the slice then we drop the slice.
// The logic here is to prevent a deadlock situation if channel is already at max capacity.
// Allows us to reap allocations that are returned and are no longer needed.
}
}
func (p *maxSlicePool) ModifyCapacity(delta int) {
if delta == 0 {
return
}
p.mtx.Lock()
defer p.mtx.Unlock()
p.max += delta
if p.max == 0 {
p.empty()
return
}
if p.capacityChange != nil {
close(p.capacityChange)
}
p.capacityChange = make(chan struct{}, p.max)
origAllocations := p.allocations
p.allocations = make(chan struct{}, p.max)
newAllocs := len(origAllocations) + delta
for i := 0; i < newAllocs; i++ {
p.allocations <- struct{}{}
}
if origAllocations != nil {
close(origAllocations)
}
origSlices := p.slices
p.slices = make(chan *[]byte, p.max)
if origSlices == nil {
return
}
close(origSlices)
for bs := range origSlices {
select {
case p.slices <- bs:
default:
// If the new channel blocks while adding slices from the old channel
// then we drop the slice. The logic here is to prevent a deadlock situation
// if the new channel has a smaller capacity then the old.
}
}
}
func (p *maxSlicePool) notifyCapacity() {
select {
case p.capacityChange <- struct{}{}:
default:
// This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized
// on capacity modifications. This is just a safety to ensure that a blocking situation can't occur.
}
}
func (p *maxSlicePool) SliceSize() int64 {
return p.sliceSize
}
func (p *maxSlicePool) Close() {
p.mtx.Lock()
defer p.mtx.Unlock()
p.empty()
}
func (p *maxSlicePool) empty() {
p.max = 0
if p.capacityChange != nil {
close(p.capacityChange)
p.capacityChange = nil
}
if p.allocations != nil {
close(p.allocations)
for range p.allocations {
// drain channel
}
p.allocations = nil
}
if p.slices != nil {
close(p.slices)
for range p.slices {
// drain channel
}
p.slices = nil
}
}
func (p *maxSlicePool) newSlice() *[]byte {
bs := make([]byte, p.sliceSize)
return &bs
}
type returnCapacityPoolCloser struct {
byteSlicePool
returnCapacity int
}
func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) {
if delta > 0 {
n.returnCapacity = -1 * delta
}
n.byteSlicePool.ModifyCapacity(delta)
}
func (n *returnCapacityPoolCloser) Close() {
if n.returnCapacity < 0 {
n.byteSlicePool.ModifyCapacity(n.returnCapacity)
}
}
type sliceAllocator func() *[]byte
var newByteSlicePool = func(sliceSize int64) byteSlicePool {
return newMaxSlicePool(sliceSize)
}

View File

@@ -0,0 +1,65 @@
package s3manager
import (
"io"
"sync"
)
// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker
type ReadSeekerWriteTo interface {
io.ReadSeeker
io.WriterTo
}
// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt
// implementation.
type BufferedReadSeekerWriteTo struct {
*BufferedReadSeeker
}
// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or
// an error occurs. Returns the number of bytes written and any error encountered during the write.
func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) {
return io.Copy(writer, b.BufferedReadSeeker)
}
// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker
type ReadSeekerWriteToProvider interface {
GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func())
}
// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse
// []byte slices for buffering parts in memory
type BufferedReadSeekerWriteToPool struct {
pool sync.Pool
}
// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create
// a pool of reusable buffers . If size is less then < 64 KiB then the buffer
// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom
// respectively will default to copying 32 KiB.
func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool {
if size < 65536 {
size = 65536
}
return &BufferedReadSeekerWriteToPool{
pool: sync.Pool{New: func() interface{} {
return make([]byte, size)
}},
}
}
// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo.
// The provided cleanup must be called after operations have been completed on the
// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool.
func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) {
buffer := p.pool.Get().([]byte)
r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)}
cleanup = func() {
p.pool.Put(buffer)
}
return r, cleanup
}

View File

@@ -0,0 +1,794 @@
package s3manager
import (
"bytes"
"fmt"
"io"
"sort"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
// on Amazon S3.
const MaxUploadParts = 10000
// MinUploadPartSize is the minimum allowed part size when uploading a part to
// Amazon S3.
const MinUploadPartSize int64 = 1024 * 1024 * 5
// DefaultUploadPartSize is the default part size to buffer chunks of a
// payload into.
const DefaultUploadPartSize = MinUploadPartSize
// DefaultUploadConcurrency is the default number of goroutines to spin up when
// using Upload().
const DefaultUploadConcurrency = 5
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
// will satisfy this interface when a multi part upload failed to upload all
// chucks to S3. In the case of a failure the UploadID is needed to operate on
// the chunks, if any, which were uploaded.
//
// Example:
//
// u := s3manager.NewUploader(opts)
// output, err := u.upload(input)
// if err != nil {
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
// // Process error and its associated uploadID
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
// } else {
// // Process error generically
// fmt.Println("Error:", err.Error())
// }
// }
//
type MultiUploadFailure interface {
awserr.Error
// Returns the upload id for the S3 multipart upload that failed.
UploadID() string
}
// So that the Error interface type can be included as an anonymous field
// in the multiUploadError struct and not conflict with the error.Error() method.
type awsError awserr.Error
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
// Composed of BaseError for code, message, and original error
//
// Should be used for an error that occurred failing a S3 multipart upload,
// and a upload ID is available. If an uploadID is not available a more relevant
type multiUploadError struct {
awsError
// ID for multipart upload which failed.
uploadID string
}
// Error returns the string representation of the error.
//
// See apierr.BaseError ErrorWithExtra for output format
//
// Satisfies the error interface.
func (m multiUploadError) Error() string {
extra := fmt.Sprintf("upload id: %s", m.uploadID)
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (m multiUploadError) String() string {
return m.Error()
}
// UploadID returns the id of the S3 upload which failed.
func (m multiUploadError) UploadID() string {
return m.uploadID
}
// UploadOutput represents a response from the Upload() call.
type UploadOutput struct {
// The URL where the object was uploaded to.
Location string
// The version of the object that was uploaded. Will only be populated if
// the S3 Bucket is versioned. If the bucket is not versioned this field
// will not be set.
VersionID *string
// The ID for a multipart upload to S3. In the case of an error the error
// can be cast to the MultiUploadFailure interface to extract the upload ID.
UploadID string
// Entity tag of the object.
ETag *string
}
// WithUploaderRequestOptions appends to the Uploader's API request options.
func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
return func(u *Uploader) {
u.RequestOptions = append(u.RequestOptions, opts...)
}
}
// The Uploader structure that calls Upload(). It is safe to call Upload()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Uploader's properties is not safe to be done concurrently.
//
// The ContentMD5 member for pre-computed MD5 checksums will be ignored for
// multipart uploads. Objects that will be uploaded in a single part, the
// ContentMD5 will be used.
//
// The Checksum members for pre-computed checksums will be ignored for
// multipart uploads. Objects that will be uploaded in a single part, will
// include the checksum member in the request.
type Uploader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
// if this value is set to zero, the DefaultUploadPartSize value will be used.
PartSize int64
// The number of goroutines to spin up in parallel per call to Upload when
// sending parts. If this is set to zero, the DefaultUploadConcurrency value
// will be used.
//
// The concurrency pool is not shared between calls to Upload.
Concurrency int
// Setting this value to true will cause the SDK to avoid calling
// AbortMultipartUpload on a failure, leaving all successfully uploaded
// parts on S3 for manual recovery.
//
// Note that storing parts of an incomplete multipart upload counts towards
// space usage on S3 and will add additional costs if not cleaned up.
LeavePartsOnError bool
// MaxUploadParts is the max number of parts which will be uploaded to S3.
// Will be used to calculate the partsize of the object to be uploaded.
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
// as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts).
//
// MaxUploadParts must not be used to limit the total number of bytes uploaded.
// Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader)
// instead. An io.LimitReader is helpful when uploading an unbounded reader
// to S3, and you know its maximum size. Otherwise the reader's io.EOF returned
// error must be used to signal end of stream.
//
// Defaults to package const's MaxUploadParts value.
MaxUploadParts int
// The client to use when uploading to S3.
S3 s3iface.S3API
// List of request options that will be passed down to individual API
// operation requests made by the uploader.
RequestOptions []request.Option
// Defines the buffer strategy used when uploading a part
BufferProvider ReadSeekerWriteToProvider
// partPool allows for the re-usage of streaming payload part buffers between upload calls
partPool byteSlicePool
}
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
// additional functional options to customize the uploader's behavior. Requires a
// client.ConfigProvider in order to create a S3 service client. The session.Session
// satisfies the client.ConfigProvider interface.
//
// Example:
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
//
// // Create an uploader with the session and custom options
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
return newUploader(s3.New(c), options...)
}
func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
u := &Uploader{
S3: client,
PartSize: DefaultUploadPartSize,
Concurrency: DefaultUploadConcurrency,
LeavePartsOnError: false,
MaxUploadParts: MaxUploadParts,
BufferProvider: defaultUploadBufferProvider(),
}
for _, option := range options {
option(u)
}
u.partPool = newByteSlicePool(u.PartSize)
return u
}
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
// additional functional options to customize the uploader's behavior. Requires
// a S3 service client to make S3 API calls.
//
// Example:
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // S3 service client the Upload manager will use.
// s3Svc := s3.New(sess)
//
// // Create an uploader with S3 client and default options
// uploader := s3manager.NewUploaderWithClient(s3Svc)
//
// // Create an uploader with S3 client and custom options
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
return newUploader(svc, options...)
}
// Upload uploads an object to S3, intelligently buffering large files into
// smaller chunks and sending them in parallel across multiple goroutines. You
// can configure the buffer size and concurrency through the Uploader's parameters.
//
// Additional functional options can be provided to configure the individual
// upload. These options are copies of the Uploader instance Upload is called from.
// Modifying the options will not impact the original Uploader instance.
//
// Use the WithUploaderRequestOptions helper function to pass in request
// options that will be applied to all API operations made with this uploader.
//
// It is safe to call this method concurrently across goroutines.
//
// Example:
// // Upload input parameters
// upParams := &s3manager.UploadInput{
// Bucket: &bucketName,
// Key: &keyName,
// Body: file,
// }
//
// // Perform an upload.
// result, err := uploader.Upload(upParams)
//
// // Perform upload with options different than the those in the Uploader.
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
// })
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
return u.UploadWithContext(aws.BackgroundContext(), input, options...)
}
// UploadWithContext uploads an object to S3, intelligently buffering large
// files into smaller chunks and sending them in parallel across multiple
// goroutines. You can configure the buffer size and concurrency through the
// Uploader's parameters.
//
// UploadWithContext is the same as Upload with the additional support for
// Context input parameters. The Context must not be nil. A nil Context will
// cause a panic. Use the context to add deadlining, timeouts, etc. The
// UploadWithContext may create sub-contexts for individual underlying requests.
//
// Additional functional options can be provided to configure the individual
// upload. These options are copies of the Uploader instance Upload is called from.
// Modifying the options will not impact the original Uploader instance.
//
// Use the WithUploaderRequestOptions helper function to pass in request
// options that will be applied to all API operations made with this uploader.
//
// It is safe to call this method concurrently across goroutines.
func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
i := uploader{in: input, cfg: u, ctx: ctx}
for _, opt := range opts {
opt(&i.cfg)
}
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
return i.upload()
}
// UploadWithIterator will upload a batched amount of objects to S3. This operation uses
// the iterator pattern to know which object to upload next. Since this is an interface this
// allows for custom defined functionality.
//
// Example:
// svc:= s3manager.NewUploader(sess)
//
// objects := []BatchUploadObject{
// {
// Object: &s3manager.UploadInput {
// Key: aws.String("key"),
// Bucket: aws.String("bucket"),
// },
// },
// }
//
// iter := &s3manager.UploadObjectsIterator{Objects: objects}
// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
// return err
// }
func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
var errs []Error
for iter.Next() {
object := iter.UploadObject()
if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
s3Err := Error{
OrigErr: err,
Bucket: object.Object.Bucket,
Key: object.Object.Key,
}
errs = append(errs, s3Err)
}
if object.After == nil {
continue
}
if err := object.After(); err != nil {
s3Err := Error{
OrigErr: err,
Bucket: object.Object.Bucket,
Key: object.Object.Key,
}
errs = append(errs, s3Err)
}
}
if len(errs) > 0 {
return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
}
return nil
}
// internal structure to manage an upload to S3.
type uploader struct {
ctx aws.Context
cfg Uploader
in *UploadInput
readerPos int64 // current reader position
totalSize int64 // set to -1 if the size is not known
}
// internal logic for deciding whether to upload a single part or use a
// multipart upload.
func (u *uploader) upload() (*UploadOutput, error) {
if err := u.init(); err != nil {
return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err)
}
defer u.cfg.partPool.Close()
if u.cfg.PartSize < MinUploadPartSize {
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
return nil, awserr.New("ConfigError", msg, nil)
}
// Do one read to determine if we have more than one part
reader, _, cleanup, err := u.nextReader()
if err == io.EOF { // single part
return u.singlePart(reader, cleanup)
} else if err != nil {
cleanup()
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
}
mu := multiuploader{uploader: u}
return mu.upload(reader, cleanup)
}
// init will initialize all default options.
func (u *uploader) init() error {
if err := validateSupportedARNType(aws.StringValue(u.in.Bucket)); err != nil {
return err
}
if u.cfg.Concurrency == 0 {
u.cfg.Concurrency = DefaultUploadConcurrency
}
if u.cfg.PartSize == 0 {
u.cfg.PartSize = DefaultUploadPartSize
}
if u.cfg.MaxUploadParts == 0 {
u.cfg.MaxUploadParts = MaxUploadParts
}
// Try to get the total size for some optimizations
if err := u.initSize(); err != nil {
return err
}
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
// so that we return []byte slices of the correct size
poolCap := u.cfg.Concurrency + 1
if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize {
u.cfg.partPool = newByteSlicePool(u.cfg.PartSize)
u.cfg.partPool.ModifyCapacity(poolCap)
} else {
u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool}
u.cfg.partPool.ModifyCapacity(poolCap)
}
return nil
}
// initSize tries to detect the total stream size, setting u.totalSize. If
// the size is not known, totalSize is set to -1.
func (u *uploader) initSize() error {
u.totalSize = -1
switch r := u.in.Body.(type) {
case io.Seeker:
n, err := aws.SeekerLen(r)
if err != nil {
return err
}
u.totalSize = n
// Try to adjust partSize if it is too small and account for
// integer division truncation.
if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
// Add one to the part size to account for remainders
// during the size calculation. e.g odd number of bytes.
u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
}
}
return nil
}
// nextReader returns a seekable reader representing the next packet of data.
// This operation increases the shared u.readerPos counter, but note that it
// does not need to be wrapped in a mutex because nextReader is only called
// from the main thread.
func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
switch r := u.in.Body.(type) {
case readerAtSeeker:
var err error
n := u.cfg.PartSize
if u.totalSize >= 0 {
bytesLeft := u.totalSize - u.readerPos
if bytesLeft <= u.cfg.PartSize {
err = io.EOF
n = bytesLeft
}
}
var (
reader io.ReadSeeker
cleanup func()
)
reader = io.NewSectionReader(r, u.readerPos, n)
if u.cfg.BufferProvider != nil {
reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
} else {
cleanup = func() {}
}
u.readerPos += n
return reader, int(n), cleanup, err
default:
part, err := u.cfg.partPool.Get(u.ctx)
if err != nil {
return nil, 0, func() {}, err
}
n, err := readFillBuf(r, *part)
u.readerPos += int64(n)
cleanup := func() {
u.cfg.partPool.Put(part)
}
return bytes.NewReader((*part)[0:n]), n, cleanup, err
}
}
func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
for offset < len(b) && err == nil {
var n int
n, err = r.Read(b[offset:])
offset += n
}
return offset, err
}
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
defer cleanup()
params := &s3.PutObjectInput{}
awsutil.Copy(params, u.in)
params.Body = r
// Need to use request form because URL generated in request is
// used in return.
req, out := u.cfg.S3.PutObjectRequest(params)
req.SetContext(u.ctx)
req.ApplyOptions(u.cfg.RequestOptions...)
if err := req.Send(); err != nil {
return nil, err
}
url := req.HTTPRequest.URL.String()
return &UploadOutput{
Location: url,
VersionID: out.VersionId,
ETag: out.ETag,
}, nil
}
// internal structure to manage a specific multipart upload to S3.
type multiuploader struct {
*uploader
wg sync.WaitGroup
m sync.Mutex
err error
uploadID string
parts completedParts
}
// keeps track of a single chunk of data being sent to S3.
type chunk struct {
buf io.ReadSeeker
num int64
cleanup func()
}
// completedParts is a wrapper to make parts sortable by their part number,
// since S3 required this list to be sent in sorted order.
type completedParts []*s3.CompletedPart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
params := &s3.CreateMultipartUploadInput{}
awsutil.Copy(params, u.in)
// Create the multipart
resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
if err != nil {
cleanup()
return nil, err
}
u.uploadID = *resp.UploadId
// Create the workers
ch := make(chan chunk, u.cfg.Concurrency)
for i := 0; i < u.cfg.Concurrency; i++ {
u.wg.Add(1)
go u.readChunk(ch)
}
// Send part 1 to the workers
var num int64 = 1
ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
// Read and queue the rest of the parts
for u.geterr() == nil && err == nil {
var (
reader io.ReadSeeker
nextChunkLen int
ok bool
)
reader, nextChunkLen, cleanup, err = u.nextReader()
ok, err = u.shouldContinue(num, nextChunkLen, err)
if !ok {
cleanup()
if err != nil {
u.seterr(err)
}
break
}
num++
ch <- chunk{buf: reader, num: num, cleanup: cleanup}
}
// Close the channel, wait for workers, and complete upload
close(ch)
u.wg.Wait()
complete := u.complete()
if err := u.geterr(); err != nil {
return nil, &multiUploadError{
awsError: awserr.New(
"MultipartUpload",
"upload multipart failed",
err),
uploadID: u.uploadID,
}
}
// Create a presigned URL of the S3 Get Object in order to have parity with
// single part upload.
getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
})
getReq.Config.Credentials = credentials.AnonymousCredentials
getReq.SetContext(u.ctx)
uploadLocation, _, _ := getReq.PresignRequest(1)
return &UploadOutput{
Location: uploadLocation,
VersionID: complete.VersionId,
UploadID: u.uploadID,
ETag: complete.ETag,
}, nil
}
func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) {
if err != nil && err != io.EOF {
return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err)
}
if nextChunkLen == 0 {
// No need to upload empty part, if file was empty to start
// with empty single part would of been created and never
// started multipart upload.
return false, nil
}
part++
// This upload exceeded maximum number of supported parts, error now.
if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) {
var msg string
if part > int64(u.cfg.MaxUploadParts) {
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
u.cfg.MaxUploadParts)
} else {
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
MaxUploadParts)
}
return false, awserr.New("TotalPartsExceeded", msg, nil)
}
return true, err
}
// readChunk runs in worker goroutines to pull chunks off of the ch channel
// and send() them as UploadPart requests.
func (u *multiuploader) readChunk(ch chan chunk) {
defer u.wg.Done()
for {
data, ok := <-ch
if !ok {
break
}
if u.geterr() == nil {
if err := u.send(data); err != nil {
u.seterr(err)
}
}
data.cleanup()
}
}
// send performs an UploadPart request and keeps track of the completed
// part information.
func (u *multiuploader) send(c chunk) error {
params := &s3.UploadPartInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
Body: c.buf,
UploadId: &u.uploadID,
SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
SSECustomerKey: u.in.SSECustomerKey,
PartNumber: &c.num,
}
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
if err != nil {
return err
}
n := c.num
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
u.m.Lock()
u.parts = append(u.parts, completed)
u.m.Unlock()
return nil
}
// geterr is a thread-safe getter for the error object
func (u *multiuploader) geterr() error {
u.m.Lock()
defer u.m.Unlock()
return u.err
}
// seterr is a thread-safe setter for the error object
func (u *multiuploader) seterr(e error) {
u.m.Lock()
defer u.m.Unlock()
u.err = e
}
// fail will abort the multipart unless LeavePartsOnError is set to true.
func (u *multiuploader) fail() {
if u.cfg.LeavePartsOnError {
return
}
params := &s3.AbortMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
}
_, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
if err != nil {
logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
}
}
// complete successfully completes a multipart upload and returns the response.
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
if u.geterr() != nil {
u.fail()
return nil
}
// Parts must be sorted in PartNumber order.
sort.Sort(u.parts)
params := &s3.CompleteMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
}
resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
if err != nil {
u.seterr(err)
u.fail()
}
return resp
}
type readerAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}

View File

@@ -0,0 +1,262 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3manager
import (
"io"
"time"
)
// UploadInput provides the input parameters for uploading a stream or buffer
// to an object in an Amazon S3 bucket. This type is similar to the s3
// package's PutObjectInput with the exception that the Body member is an
// io.Reader instead of an io.ReadSeeker.
//
// The ContentMD5 member for pre-computed MD5 checksums will be ignored for
// multipart uploads. Objects that will be uploaded in a single part, the
// ContentMD5 will be used.
//
// The Checksum members for pre-computed checksums will be ignored for
// multipart uploads. Objects that will be uploaded in a single part, will
// include the checksum member in the request.
type UploadInput struct {
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
// The canned ACL to apply to the object. For more information, see Canned ACL
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
//
// This action is not supported by Amazon S3 on Outposts.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// The readable body payload to send to S3.
Body io.Reader
// The bucket name to which the PUT action was initiated.
//
// When using this action with an access point, you must direct requests to
// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
// When using this action with Amazon S3 on Outposts, you must direct requests
// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
// using this action with S3 on Outposts through the Amazon Web Services SDKs,
// you provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using AWS KMS (SSE-KMS). Setting this header
// to true causes Amazon S3 to use an S3 Bucket Key for object encryption with
// SSE-KMS.
//
// Specifying this header with a PUT action doesnt affect bucket-level settings
// for S3 Bucket Key.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Can be used to specify caching behavior along the request/reply chain. For
// more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
// Indicates the algorithm used to create the checksum for the object when using
// the SDK. This header will not provide any additional functionality if not
// using the SDK. When sending this header, there must be a corresponding x-amz-checksum
// or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
// the HTTP status code 400 Bad Request. For more information, see Checking
// object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
// parameter.
//
// The AWS SDK for Go v1 does not support automatic computing request payload
// checksum. This feature is available in the AWS SDK for Go v2. If a value
// is specified for this parameter, the matching algorithm's checksum member
// must be populated with the algorithm's checksum of the request payload.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies
// the base64-encoded, 32-bit CRC32 checksum of the object. For more information,
// see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"`
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies
// the base64-encoded, 32-bit CRC32C checksum of the object. For more information,
// see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"`
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies
// the base64-encoded, 160-bit SHA-1 digest of the object. For more information,
// see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"`
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies
// the base64-encoded, 256-bit SHA-256 digest of the object. For more information,
// see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"`
// Specifies presentational information for the object. For more information,
// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1).
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
// Specifies what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
// by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
// The language the content is in.
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
// The base64-encoded 128-bit MD5 digest of the message (without the headers)
// according to RFC 1864. This header can be used as a message integrity check
// to verify that the data is the same data that was originally sent. Although
// it is optional, we recommend using the Content-MD5 mechanism as an end-to-end
// integrity check. For more information about REST request authentication,
// see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
//
// If the ContentMD5 is provided for a multipart upload, it will be ignored.
// Objects that will be uploaded in a single part, the ContentMD5 will be used.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
// A standard MIME type describing the format of the contents. For more information,
// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17).
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable. For more information,
// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
//
// This action is not supported by Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
//
// This action is not supported by Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
//
// This action is not supported by Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
//
// This action is not supported by Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the PUT action was initiated.
//
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// A map of metadata to store with the object in S3.
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// Specifies whether a legal hold will be applied to this object. For more information
// about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
// The Object Lock mode that you want to apply to this object.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// The date and time when you want this object's Object Lock to expire. Must
// be formatted as a timestamp parameter.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Specifies the algorithm to use to when encrypting the object (for example,
// AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
// S3 does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
// If x-amz-server-side-encryption is present and has the value of aws:kms,
// this header specifies the ID of the Amazon Web Services Key Management Service
// (Amazon Web Services KMS) symmetrical customer managed key that was used
// for the object. If you specify x-amz-server-side-encryption:aws:kms, but
// do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses
// the Amazon Web Services managed key to protect the data. If the KMS key does
// not exist in the same account issuing the command, you must use the full
// ARN and not just the ID.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
// The server-side encryption algorithm used when storing this object in Amazon
// S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high availability.
// Depending on performance needs, you can specify a different Storage Class.
// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
// (For example, "Key1=Value1")
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata. For information about object
// metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html).
//
// In the following example, the request header sets the redirect to an object
// (anotherPage.html) in the same bucket:
//
// x-amz-website-redirect-location: /anotherPage.html
//
// In the following example, the request header sets the object redirect to
// another website:
//
// x-amz-website-redirect-location: http://www.example.com/
//
// For more information about website hosting in Amazon S3, see Hosting Websites
// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
// and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}

View File

@@ -0,0 +1,75 @@
package s3manager
import (
"bufio"
"io"
"sync"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom
type WriterReadFrom interface {
io.Writer
io.ReaderFrom
}
// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer
type WriterReadFromProvider interface {
GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func())
}
type bufferedWriter interface {
WriterReadFrom
Flush() error
Reset(io.Writer)
}
type bufferedReadFrom struct {
bufferedWriter
}
func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) {
n, err := b.bufferedWriter.ReadFrom(r)
if flushErr := b.Flush(); flushErr != nil && err == nil {
err = flushErr
}
return n, err
}
// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool
// to manage allocation and reuse of *bufio.Writer structures.
type PooledBufferedReadFromProvider struct {
pool sync.Pool
}
// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider
// Size is used to control the size of the underlying *bufio.Writer created for
// calls to GetReadFrom.
func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider {
if size < int(32*sdkio.KibiByte) {
size = int(64 * sdkio.KibiByte)
}
return &PooledBufferedReadFromProvider{
pool: sync.Pool{
New: func() interface{} {
return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)}
},
},
}
}
// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom
// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom
// has been completed in order to allow the reuse of the *bufio.Writer
func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) {
buffer := p.pool.Get().(*bufferedReadFrom)
buffer.Reset(writer)
r = buffer
cleanup = func() {
buffer.Reset(nil) // Reset to nil writer to release reference
p.pool.Put(buffer)
}
return r, cleanup
}

107
vendor/github.com/aws/aws-sdk-go/service/s3/service.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/restxml"
)
// S3 provides the API operation methods for making requests to
// Amazon Simple Storage Service. See this package's package overview docs
// for details on the service.
//
// S3 methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type S3 struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "s3" // Name of service.
EndpointsID = ServiceName // ID to lookup a service endpoint with.
ServiceID = "S3" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the S3 client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a S3 client from just a session.
// svc := s3.New(mySession)
//
// // Create a S3 client with additional configuration
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {
c.SigningName = "s3"
}
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *S3 {
svc := &S3{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2006-03-01",
ResolvedRegion: resolvedRegion,
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) {
s.DisableURIPathEscaping = true
}))
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler)
svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a S3 operation and runs any
// custom request initialization.
func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

84
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go generated vendored Normal file
View File

@@ -0,0 +1,84 @@
package s3
import (
"crypto/md5"
"encoding/base64"
"net/http"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
func validateSSERequiresSSL(r *request.Request) {
if r.HTTPRequest.URL.Scheme == "https" {
return
}
if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
if len(iface.getSSECustomerKey()) > 0 {
r.Error = errSSERequiresSSL
return
}
}
if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
if len(iface.getCopySourceSSECustomerKey()) > 0 {
r.Error = errSSERequiresSSL
return
}
}
}
const (
sseKeyHeader = "x-amz-server-side-encryption-customer-key"
sseKeyMD5Header = sseKeyHeader + "-md5"
)
func computeSSEKeyMD5(r *request.Request) {
var key string
if g, ok := r.Params.(sseCustomerKeyGetter); ok {
key = g.getSSECustomerKey()
}
computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest)
}
const (
copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key"
copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5"
)
func computeCopySourceSSEKeyMD5(r *request.Request) {
var key string
if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
key = g.getCopySourceSSECustomerKey()
}
computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest)
}
func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
if len(key) == 0 {
// Backwards compatiablity where user just set the header value instead
// of using the API parameter, or setting the header value for an
// operation without the parameters modeled.
key = r.Header.Get(keyHeader)
if len(key) == 0 {
return
}
// In backwards compatible, the header's value is not base64 encoded,
// and needs to be encoded and updated by the SDK's customizations.
b64Key := base64.StdEncoding.EncodeToString([]byte(key))
r.Header.Set(keyHeader, b64Key)
}
// Only update Key's MD5 if not already set.
if len(r.Header.Get(keyMD5Header)) == 0 {
sum := md5.Sum([]byte(key))
keyMD5 := base64.StdEncoding.EncodeToString(sum[:])
r.Header.Set(keyMD5Header, keyMD5)
}
}

View File

@@ -0,0 +1,47 @@
package s3
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
func copyMultipartStatusOKUnmarshalError(r *request.Request) {
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
r.HTTPResponse.Body.Close()
if err != nil {
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization, "unable to read response body", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
// Note, some middleware later in the stack like restxml.Unmarshal expect a valid, non-closed Body
// even in case of an error, so we replace it with an empty Reader.
r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(nil))
return
}
body := bytes.NewReader(b)
r.HTTPResponse.Body = ioutil.NopCloser(body)
defer body.Seek(0, sdkio.SeekStart)
unmarshalError(r)
if err, ok := r.Error.(awserr.Error); ok && err != nil {
if err.Code() == request.ErrCodeSerialization &&
err.OrigErr() != io.EOF {
r.Error = nil
return
}
// if empty payload
if err.OrigErr() == io.EOF {
r.HTTPResponse.StatusCode = http.StatusInternalServerError
} else {
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
}
}
}

View File

@@ -0,0 +1,114 @@
package s3
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
type xmlErrorResponse struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"`
Message string `xml:"Message"`
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
// Bucket exists in a different region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
msg := fmt.Sprintf(
"incorrect region, the bucket is not in '%s' region at endpoint '%s'",
aws.StringValue(r.Config.Region),
aws.StringValue(r.Config.Endpoint),
)
if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
msg += fmt.Sprintf(", bucket is in '%s' region", v)
}
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError", msg, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Attempt to parse error from body if it is known
var errResp xmlErrorResponse
var err error
if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 {
err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body)
} else {
err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
}
if err != nil {
var errorMsg string
if err == io.EOF {
errorMsg = "empty response payload"
} else {
errorMsg = "failed to unmarshal error message"
}
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
errorMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Fallback to status code converted to message if still no error code
if len(errResp.Code) == 0 {
statusText := http.StatusText(r.HTTPResponse.StatusCode)
errResp.Code = strings.Replace(statusText, " ", "", -1)
errResp.Message = statusText
}
r.Error = awserr.NewRequestFailure(
awserr.New(errResp.Code, errResp.Message, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}
// A RequestFailure provides access to the S3 Request ID and Host ID values
// returned from API operation errors. Getting the error as a string will
// return the formated error with the same information as awserr.RequestFailure,
// while also adding the HostID value from the response.
type RequestFailure interface {
awserr.RequestFailure
// Host ID is the S3 Host ID needed for debug, and contacting support
HostID() string
}
// s3unmarshalXMLError is s3 specific xml error unmarshaler
// for 200 OK errors and response payloads.
// This function differs from the xmlUtil.UnmarshalXMLError
// func. It does not ignore the EOF error and passes it up.
// Related to bug fix for `s3 200 OK response with empty payload`
func s3unmarshalXMLError(v interface{}, stream io.Reader) error {
var errBuf bytes.Buffer
body := io.TeeReader(stream, &errBuf)
err := xml.NewDecoder(body).Decode(v)
if err != nil && err != io.EOF {
return awserr.NewUnmarshalError(err,
"failed to unmarshal error message", errBuf.Bytes())
}
return err
}

214
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go generated vendored Normal file
View File

@@ -0,0 +1,214 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// WaitUntilBucketExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilBucketExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 200,
},
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 301,
},
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 403,
},
{
State: request.RetryWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadBucketInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadBucketRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilBucketNotExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilBucketNotExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadBucketInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadBucketRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilObjectExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilObjectExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 200,
},
{
State: request.RetryWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadObjectInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadObjectRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilObjectNotExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilObjectNotExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadObjectInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadObjectRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}

1354
vendor/github.com/aws/aws-sdk-go/service/sso/api.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

44
vendor/github.com/aws/aws-sdk-go/service/sso/doc.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package sso provides the client and types for making API
// requests to AWS Single Sign-On.
//
// AWS Single Sign-On Portal is a web service that makes it easy for you to
// assign user access to AWS SSO resources such as the user portal. Users can
// get AWS account applications and roles assigned to them and get federated
// into the application.
//
// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
// in the AWS SSO User Guide.
//
// This API reference guide describes the AWS SSO Portal operations that you
// can call programatically and includes detailed information on data types
// and errors.
//
// AWS provides SDKs that consist of libraries and sample code for various programming
// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
// provide a convenient way to create programmatic access to AWS SSO and other
// AWS services. For more information about the AWS SDKs, including how to download
// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
//
// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
//
// See sso package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
//
// Using the Client
//
// To contact AWS Single Sign-On with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the AWS Single Sign-On client SSO for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New
package sso

44
vendor/github.com/aws/aws-sdk-go/service/sso/errors.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package sso
import (
"github.com/aws/aws-sdk-go/private/protocol"
)
const (
// ErrCodeInvalidRequestException for service response error code
// "InvalidRequestException".
//
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
ErrCodeInvalidRequestException = "InvalidRequestException"
// ErrCodeResourceNotFoundException for service response error code
// "ResourceNotFoundException".
//
// The specified resource doesn't exist.
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
// ErrCodeTooManyRequestsException for service response error code
// "TooManyRequestsException".
//
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
ErrCodeTooManyRequestsException = "TooManyRequestsException"
// ErrCodeUnauthorizedException for service response error code
// "UnauthorizedException".
//
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
ErrCodeUnauthorizedException = "UnauthorizedException"
)
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"InvalidRequestException": newErrorInvalidRequestException,
"ResourceNotFoundException": newErrorResourceNotFoundException,
"TooManyRequestsException": newErrorTooManyRequestsException,
"UnauthorizedException": newErrorUnauthorizedException,
}

105
vendor/github.com/aws/aws-sdk-go/service/sso/service.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package sso
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/restjson"
)
// SSO provides the API operation methods for making requests to
// AWS Single Sign-On. See this package's package overview docs
// for details on the service.
//
// SSO methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type SSO struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "SSO" // Name of service.
EndpointsID = "portal.sso" // ID to lookup a service endpoint with.
ServiceID = "SSO" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the SSO client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a SSO client from just a session.
// svc := sso.New(mySession)
//
// // Create a SSO client with additional configuration
// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {
c.SigningName = "awsssoportal"
}
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO {
svc := &SSO{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2019-06-10",
ResolvedRegion: resolvedRegion,
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(
protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a SSO operation and runs any
// custom request initialization.
func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View File

@@ -0,0 +1,86 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package ssoiface
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/sso"
)
// SSOAPI provides an interface to enable mocking the
// sso.SSO service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // AWS Single Sign-On.
// func myFunc(svc ssoiface.SSOAPI) bool {
// // Make svc.GetRoleCredentials request
// }
//
// func main() {
// sess := session.New()
// svc := sso.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockSSOClient struct {
// ssoiface.SSOAPI
// }
// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockSSOClient{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
type SSOAPI interface {
GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error)
GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error)
GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput)
ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error)
ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error)
ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput)
ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error
ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error
ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error)
ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error)
ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput)
ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error
ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error
Logout(*sso.LogoutInput) (*sso.LogoutOutput, error)
LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error)
LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput)
}
var _ SSOAPI = (*sso.SSO)(nil)

3439
vendor/github.com/aws/aws-sdk-go/service/sts/api.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
package sts
import "github.com/aws/aws-sdk-go/aws/request"
func init() {
initRequest = customizeRequest
}
func customizeRequest(r *request.Request) {
r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException)
}

32
vendor/github.com/aws/aws-sdk-go/service/sts/doc.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package sts provides the client and types for making API
// requests to AWS Security Token Service.
//
// Security Token Service (STS) enables you to request temporary, limited-privilege
// credentials for Identity and Access Management (IAM) users or for users that
// you authenticate (federated users). This guide provides descriptions of the
// STS API. For more information about using this service, see Temporary Security
// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
//
// See sts package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
//
// Using the Client
//
// To contact AWS Security Token Service with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the AWS Security Token Service client STS for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
package sts

84
vendor/github.com/aws/aws-sdk-go/service/sts/errors.go generated vendored Normal file
View File

@@ -0,0 +1,84 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package sts
const (
// ErrCodeExpiredTokenException for service response error code
// "ExpiredTokenException".
//
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
ErrCodeExpiredTokenException = "ExpiredTokenException"
// ErrCodeIDPCommunicationErrorException for service response error code
// "IDPCommunicationError".
//
// The request could not be fulfilled because the identity provider (IDP) that
// was asked to verify the incoming identity token could not be reached. This
// is often a transient error caused by network conditions. Retry the request
// a limited number of times so that you don't exceed the request rate. If the
// error persists, the identity provider might be down or not responding.
ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
// ErrCodeIDPRejectedClaimException for service response error code
// "IDPRejectedClaim".
//
// The identity provider (IdP) reported that authentication failed. This might
// be because the claim is invalid.
//
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
// can also mean that the claim has expired or has been explicitly revoked.
ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
// ErrCodeInvalidAuthorizationMessageException for service response error code
// "InvalidAuthorizationMessageException".
//
// The error returned if the message passed to DecodeAuthorizationMessage was
// invalid. This can happen if the token contains invalid characters, such as
// linebreaks.
ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
// ErrCodeInvalidIdentityTokenException for service response error code
// "InvalidIdentityToken".
//
// The web identity token that was passed could not be validated by Amazon Web
// Services. Get a new identity token from the identity provider and then retry
// the request.
ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
// ErrCodeMalformedPolicyDocumentException for service response error code
// "MalformedPolicyDocument".
//
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
// ErrCodePackedPolicyTooLargeException for service response error code
// "PackedPolicyTooLarge".
//
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
// ErrCodeRegionDisabledException for service response error code
// "RegionDisabledException".
//
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
ErrCodeRegionDisabledException = "RegionDisabledException"
)

103
vendor/github.com/aws/aws-sdk-go/service/sts/service.go generated vendored Normal file
View File

@@ -0,0 +1,103 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package sts
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/query"
)
// STS provides the API operation methods for making requests to
// AWS Security Token Service. See this package's package overview docs
// for details on the service.
//
// STS methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type STS struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "sts" // Name of service.
EndpointsID = ServiceName // ID to lookup a service endpoint with.
ServiceID = "STS" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the STS client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a STS client from just a session.
// svc := sts.New(mySession)
//
// // Create a STS client with additional configuration
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {
c.SigningName = EndpointsID
// No Fallback
}
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS {
svc := &STS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2011-06-15",
ResolvedRegion: resolvedRegion,
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(query.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a STS operation and runs any
// custom request initialization.
func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View File

@@ -0,0 +1,96 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package stsiface
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/sts"
)
// STSAPI provides an interface to enable mocking the
// sts.STS service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // AWS Security Token Service.
// func myFunc(svc stsiface.STSAPI) bool {
// // Make svc.AssumeRole request
// }
//
// func main() {
// sess := session.New()
// svc := sts.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockSTSClient struct {
// stsiface.STSAPI
// }
// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockSTSClient{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
type STSAPI interface {
AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error)
AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error)
AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error)
DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error)
GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error)
GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput)
GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error)
GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error)
GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput)
GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error)
GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error)
GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
}
var _ STSAPI = (*sts.STS)(nil)