TUN-3375: Upgrade coredns and prometheus dependencies

This commit is contained in:
Igor Postelnik
2020-09-09 13:09:42 -05:00
parent 7acea1ac99
commit 741cd66c9e
757 changed files with 86868 additions and 32428 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
const (
@@ -25,30 +24,6 @@ const (
appendMD5TxEncoding = "append-md5"
)
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
// require it.
func contentMD5(r *request.Request) {
h := md5.New()
if !aws.IsReaderSeekable(r.Body) {
if r.Config.Logger != nil {
r.Config.Logger.Log(fmt.Sprintf(
"Unable to compute Content-MD5 for unseekable body, S3.%s",
r.Operation.Name))
}
return
}
if _, err := copySeekableBody(h, r.Body); err != nil {
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
return
}
// encode the md5 checksum in base64 and set the request header.
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
r.HTTPRequest.Header.Set(contentMD5Header, v)
}
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
// request. If the body is not seekable or S3DisableContentMD5Validation set
// this handler will be ignored.
@@ -90,7 +65,7 @@ func computeBodyHashes(r *request.Request) {
dst = io.MultiWriter(hashers...)
}
if _, err := copySeekableBody(dst, r.Body); err != nil {
if _, err := aws.CopySeekableBody(dst, r.Body); err != nil {
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
return
}
@@ -119,28 +94,6 @@ const (
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
)
func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
curPos, err := src.Seek(0, sdkio.SeekCurrent)
if err != nil {
return 0, err
}
// hash the body. seek back to the first position after reading to reset
// the body for transmission. copy errors may be assumed to be from the
// body.
n, err := io.Copy(dst, src)
if err != nil {
return n, err
}
_, err = src.Seek(curPos, sdkio.SeekStart)
if err != nil {
return n, err
}
return n, nil
}
// Adds the x-amz-te: append_md5 header to the request. This requests the service
// responds with a trailing MD5 checksum.
//

View File

@@ -4,6 +4,7 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3err"
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
)
func init() {
@@ -13,7 +14,7 @@ func init() {
func defaultInitClientFn(c *client.Client) {
// Support building custom endpoints based on config
c.Handlers.Build.PushFront(updateEndpointForS3Config)
c.Handlers.Build.PushFront(endpointHandler)
// Require SSL when using SSE keys
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
@@ -27,17 +28,11 @@ func defaultInitClientFn(c *client.Client) {
}
func defaultInitRequestFn(r *request.Request) {
// Add reuest handlers for specific platforms.
// Add request handlers for specific platforms.
// e.g. 100-continue support for PUT requests using Go 1.6
platformRequestHandlers(r)
switch r.Operation.Name {
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
opPutBucketReplication:
// These S3 operations require Content-MD5 to be set
r.Handlers.Build.PushBack(contentMD5)
case opGetBucketLocation:
// GetBucketLocation has custom parsing logic
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
@@ -73,3 +68,8 @@ type sseCustomerKeyGetter interface {
type copySourceSSECustomerKeyGetter interface {
getCopySourceSSECustomerKey() string
}
type endpointARNGetter interface {
getEndpointARN() (arn.Resource, error)
hasEndpointARN() bool
}

View File

@@ -104,19 +104,6 @@
// content from S3. The Encryption and Decryption clients can be used concurrently
// once the client is created.
//
// sess := session.Must(session.NewSession())
//
// // Create the decryption client.
// svc := s3crypto.NewDecryptionClient(sess)
//
// // The object will be downloaded from S3 and decrypted locally. By metadata
// // about the object's encryption will instruct the decryption client how
// // decrypt the content of the object. By default KMS is used for keys.
// result, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String(myBucket),
// Key: aws.String(myKey),
// })
//
// See the s3crypto package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
//

233
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go generated vendored Normal file
View File

@@ -0,0 +1,233 @@
package s3
import (
"net/url"
"strings"
"github.com/aws/aws-sdk-go/aws"
awsarn "github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
)
// Used by shapes with members decorated as endpoint ARN.
func parseEndpointARN(v string) (arn.Resource, error) {
return arn.ParseResource(v, accessPointResourceParser)
}
func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
resParts := arn.SplitResource(a.Resource)
switch resParts[0] {
case "accesspoint":
return arn.ParseAccessPointResource(a, resParts[1:])
default:
return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
}
}
func endpointHandler(req *request.Request) {
endpoint, ok := req.Params.(endpointARNGetter)
if !ok || !endpoint.hasEndpointARN() {
updateBucketEndpointFromParams(req)
return
}
resource, err := endpoint.getEndpointARN()
if err != nil {
req.Error = newInvalidARNError(nil, err)
return
}
resReq := resourceRequest{
Resource: resource,
Request: req,
}
if resReq.IsCrossPartition() {
req.Error = newClientPartitionMismatchError(resource,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
return
}
if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {
req.Error = newClientRegionMismatchError(resource,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
return
}
if resReq.HasCustomEndpoint() {
req.Error = newInvalidARNWithCustomEndpointError(resource, nil)
return
}
switch tv := resource.(type) {
case arn.AccessPointARN:
err = updateRequestAccessPointEndpoint(req, tv)
if err != nil {
req.Error = err
}
default:
req.Error = newInvalidARNError(resource, nil)
}
}
type resourceRequest struct {
Resource arn.Resource
Request *request.Request
}
func (r resourceRequest) ARN() awsarn.ARN {
return r.Resource.GetARN()
}
func (r resourceRequest) AllowCrossRegion() bool {
return aws.BoolValue(r.Request.Config.S3UseARNRegion)
}
func (r resourceRequest) UseFIPS() bool {
return isFIPS(aws.StringValue(r.Request.Config.Region))
}
func (r resourceRequest) IsCrossPartition() bool {
return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
}
func (r resourceRequest) IsCrossRegion() bool {
return isCrossRegion(r.Request, r.Resource.GetARN().Region)
}
func (r resourceRequest) HasCustomEndpoint() bool {
return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
}
func isFIPS(clientRegion string) bool {
return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
}
func isCrossRegion(req *request.Request, otherRegion string) bool {
return req.ClientInfo.SigningRegion != otherRegion
}
func updateBucketEndpointFromParams(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucket name was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
updateEndpointForS3Config(r, bucket)
}
func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error {
// Accelerate not supported
if aws.BoolValue(req.Config.S3UseAccelerate) {
return newClientConfiguredForAccelerateError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Ignore the disable host prefix for access points since custom endpoints
// are not supported.
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil {
return err
}
removeBucketFromPath(req.HTTPRequest.URL)
return nil
}
func removeBucketFromPath(u *url.URL) {
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
if u.Path == "" {
u.Path = "/"
}
}
type accessPointEndpointBuilder arn.AccessPointARN
const (
accessPointPrefixLabel = "accesspoint"
accountIDPrefixLabel = "accountID"
accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
)
func (a accessPointEndpointBuilder) Build(req *request.Request) error {
resolveRegion := arn.AccessPointARN(a).Region
cfgRegion := aws.StringValue(req.Config.Region)
if isFIPS(cfgRegion) {
if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) {
// FIPS with cross region is not supported, the SDK must fail
// because there is no well defined method for SDK to construct a
// correct FIPS endpoint.
return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a),
req.ClientInfo.PartitionID, cfgRegion, nil)
}
resolveRegion = cfgRegion
}
endpoint, err := resolveRegionalEndpoint(req, resolveRegion)
if err != nil {
return newFailedToResolveEndpointError(arn.AccessPointARN(a),
req.ClientInfo.PartitionID, cfgRegion, err)
}
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
return err
}
const serviceEndpointLabel = "s3-accesspoint"
// dualstack provided by endpoint resolver
cfgHost := req.HTTPRequest.URL.Host
if strings.HasPrefix(cfgHost, "s3") {
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
}
protocol.HostPrefixBuilder{
Prefix: accesPointPrefixTemplate,
LabelsFn: a.hostPrefixLabelValues,
}.Build(req)
req.ClientInfo.SigningName = endpoint.SigningName
req.ClientInfo.SigningRegion = endpoint.SigningRegion
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
if err != nil {
return newInvalidARNError(arn.AccessPointARN(a), err)
}
return nil
}
func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
return map[string]string{
accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
}
}
func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) {
return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) {
opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
})
}
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL))
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
if err != nil {
return awserr.New(request.ErrCodeSerialization,
"failed to parse endpoint URL", err)
}
return nil
}

View File

@@ -0,0 +1,151 @@
package s3
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
)
const (
invalidARNErrorErrCode = "InvalidARNError"
configurationErrorErrCode = "ConfigurationError"
)
type invalidARNError struct {
message string
resource arn.Resource
origErr error
}
func (e invalidARNError) Error() string {
var extra string
if e.resource != nil {
extra = "ARN: " + e.resource.String()
}
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
}
func (e invalidARNError) Code() string {
return invalidARNErrorErrCode
}
func (e invalidARNError) Message() string {
return e.message
}
func (e invalidARNError) OrigErr() error {
return e.origErr
}
func newInvalidARNError(resource arn.Resource, err error) invalidARNError {
return invalidARNError{
message: "invalid ARN",
origErr: err,
resource: resource,
}
}
func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError {
return invalidARNError{
message: "resource ARN not supported with custom client endpoints",
origErr: err,
resource: resource,
}
}
// ARN not supported for the target partition
func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError {
return invalidARNError{
message: "resource ARN not supported for the target ARN partition",
origErr: err,
resource: resource,
}
}
type configurationError struct {
message string
resource arn.Resource
clientPartitionID string
clientRegion string
origErr error
}
func (e configurationError) Error() string {
extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
e.resource, e.clientPartitionID, e.clientRegion)
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
}
func (e configurationError) Code() string {
return configurationErrorErrCode
}
func (e configurationError) Message() string {
return e.message
}
func (e configurationError) OrigErr() error {
return e.origErr
}
func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client partition does not match provided ARN partition",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client region does not match provided ARN region",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "endpoint resolver failed to find an endpoint for the provided ARN region",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client configured for fips but cross-region resource ARN provided",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client configured for S3 Accelerate but is supported with resource ARN",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}

View File

@@ -13,6 +13,12 @@ const (
// ErrCodeBucketAlreadyOwnedByYou for service response error code
// "BucketAlreadyOwnedByYou".
//
// The bucket you tried to create already exists, and you own it. Amazon S3
// returns this error in all AWS Regions except in the North Virginia Region.
// For legacy compatibility, if you re-create an existing bucket that you already
// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the
// bucket access control lists (ACLs).
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
// ErrCodeNoSuchBucket for service response error code
@@ -36,13 +42,13 @@ const (
// ErrCodeObjectAlreadyInActiveTierError for service response error code
// "ObjectAlreadyInActiveTierError".
//
// This operation is not allowed against this storage tier
// This operation is not allowed against this storage tier.
ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
// ErrCodeObjectNotInActiveTierError for service response error code
// "ObjectNotInActiveTierError".
//
// The source object of the COPY operation is not in the active tier and is
// only stored in Amazon Glacier.
// only stored in Amazon S3 Glacier.
ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
)

View File

@@ -30,10 +30,10 @@ var accelerateOpBlacklist = operationBlacklist{
opListBuckets, opCreateBucket, opDeleteBucket,
}
// Request handler to automatically add the bucket name to the endpoint domain
// Automatically add the bucket name to the endpoint domain
// if possible. This style of bucket is valid for all bucket names which are
// DNS compatible and do not contain "."
func updateEndpointForS3Config(r *request.Request) {
func updateEndpointForS3Config(r *request.Request, bucketName string) {
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
@@ -43,45 +43,29 @@ func updateEndpointForS3Config(r *request.Request) {
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
}
}
updateEndpointForAccelerate(r)
updateEndpointForAccelerate(r, bucketName)
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
updateEndpointForHostStyle(r)
updateEndpointForHostStyle(r, bucketName)
}
}
func updateEndpointForHostStyle(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucketname was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
func updateEndpointForHostStyle(r *request.Request, bucketName string) {
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
// bucket name must be valid to put into the host
return
}
moveBucketToHost(r.HTTPRequest.URL, bucket)
moveBucketToHost(r.HTTPRequest.URL, bucketName)
}
var (
accelElem = []byte("s3-accelerate.dualstack.")
)
func updateEndpointForAccelerate(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucketname was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
func updateEndpointForAccelerate(r *request.Request, bucketName string) {
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
r.Error = awserr.New("InvalidParameterException",
fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName),
nil)
return
}
@@ -106,7 +90,7 @@ func updateEndpointForAccelerate(r *request.Request) {
r.HTTPRequest.URL.Host = strings.Join(parts, ".")
moveBucketToHost(r.HTTPRequest.URL, bucket)
moveBucketToHost(r.HTTPRequest.URL, bucketName)
}
// Attempts to retrieve the bucket name from the request input parameters.
@@ -148,8 +132,5 @@ func dnsCompatibleBucketName(bucket string) bool {
// moveBucketToHost moves the bucket name from the URI path to URL host.
func moveBucketToHost(u *url.URL, bucket string) {
u.Host = bucket + "." + u.Host
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
if u.Path == "" {
u.Path = "/"
}
removeBucketFromPath(u)
}

View File

@@ -0,0 +1,45 @@
package arn
import (
"strings"
"github.com/aws/aws-sdk-go/aws/arn"
)
// AccessPointARN provides representation
type AccessPointARN struct {
arn.ARN
AccessPointName string
}
// GetARN returns the base ARN for the Access Point resource
func (a AccessPointARN) GetARN() arn.ARN {
return a.ARN
}
// ParseAccessPointResource attempts to parse the ARN's resource as an
// AccessPoint resource.
func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
if len(a.Region) == 0 {
return AccessPointARN{}, InvalidARNError{a, "region not set"}
}
if len(a.AccountID) == 0 {
return AccessPointARN{}, InvalidARNError{a, "account-id not set"}
}
if len(resParts) == 0 {
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
}
if len(resParts) > 1 {
return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"}
}
resID := resParts[0]
if len(strings.TrimSpace(resID)) == 0 {
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
}
return AccessPointARN{
ARN: a,
AccessPointName: resID,
}, nil
}

View File

@@ -0,0 +1,71 @@
package arn
import (
"strings"
"github.com/aws/aws-sdk-go/aws/arn"
)
// Resource provides the interfaces abstracting ARNs of specific resource
// types.
type Resource interface {
GetARN() arn.ARN
String() string
}
// ResourceParser provides the function for parsing an ARN's resource
// component into a typed resource.
type ResourceParser func(arn.ARN) (Resource, error)
// ParseResource parses an AWS ARN into a typed resource for the S3 API.
func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) {
a, err := arn.Parse(s)
if err != nil {
return nil, err
}
if len(a.Partition) == 0 {
return nil, InvalidARNError{a, "partition not set"}
}
if a.Service != "s3" {
return nil, InvalidARNError{a, "service is not S3"}
}
if len(a.Resource) == 0 {
return nil, InvalidARNError{a, "resource not set"}
}
return resParser(a)
}
// SplitResource splits the resource components by the ARN resource delimiters.
func SplitResource(v string) []string {
var parts []string
var offset int
for offset <= len(v) {
idx := strings.IndexAny(v[offset:], "/:")
if idx < 0 {
parts = append(parts, v[offset:])
break
}
parts = append(parts, v[offset:idx+offset])
offset += idx + 1
}
return parts
}
// IsARN returns whether the given string is an ARN
func IsARN(s string) bool {
return arn.IsARN(s)
}
// InvalidARNError provides the error for an invalid ARN error.
type InvalidARNError struct {
ARN arn.ARN
Reason string
}
func (e InvalidARNError) Error() string {
return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String()
}

View File

@@ -31,7 +31,7 @@ var initRequest func(*request.Request)
const (
ServiceName = "s3" // Name of service.
EndpointsID = ServiceName // ID to lookup a service endpoint with.
ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
ServiceID = "S3" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the S3 client with a session.
@@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a S3 client from just a session.
// svc := s3.New(mySession)
//
@@ -46,11 +48,11 @@ const (
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 {
svc := &S3{
Client: client.New(
cfg,
@@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2006-03-01",
},
@@ -75,6 +78,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler)
svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
// Run custom client initialization if present

View File

@@ -69,7 +69,7 @@ func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
return
}
// In backwards compatiable, the header's value is not base64 encoded,
// In backwards compatible, the header's value is not base64 encoded,
// and needs to be encoded and updated by the SDK's customizations.
b64Key := base64.StdEncoding.EncodeToString([]byte(key))
r.Header.Set(keyHeader, b64Key)

View File

@@ -2,6 +2,7 @@ package s3
import (
"bytes"
"io"
"io/ioutil"
"net/http"
@@ -24,17 +25,18 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
r.HTTPResponse.Body = ioutil.NopCloser(body)
defer body.Seek(0, sdkio.SeekStart)
if body.Len() == 0 {
// If there is no body don't attempt to parse the body.
return
}
unmarshalError(r)
if err, ok := r.Error.(awserr.Error); ok && err != nil {
if err.Code() == request.ErrCodeSerialization {
if err.Code() == request.ErrCodeSerialization &&
err.OrigErr() != io.EOF {
r.Error = nil
return
}
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
// if empty payload
if err.OrigErr() == io.EOF {
r.HTTPResponse.StatusCode = http.StatusInternalServerError
} else {
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
}
}
}

View File

@@ -1,6 +1,7 @@
package s3
import (
"bytes"
"encoding/xml"
"fmt"
"io"
@@ -45,17 +46,24 @@ func unmarshalError(r *request.Request) {
// Attempt to parse error from body if it is known
var errResp xmlErrorResponse
err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
if err == io.EOF {
// Only capture the error if an unmarshal error occurs that is not EOF,
// because S3 might send an error without a error message which causes
// the XML unmarshal to fail with EOF.
err = nil
var err error
if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 {
err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body)
} else {
err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
}
if err != nil {
var errorMsg string
if err == io.EOF {
errorMsg = "empty response payload"
} else {
errorMsg = "failed to unmarshal error message"
}
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
"failed to unmarshal error message", err),
errorMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
@@ -86,3 +94,21 @@ type RequestFailure interface {
// Host ID is the S3 Host ID needed for debug, and contacting support
HostID() string
}
// s3unmarshalXMLError is s3 specific xml error unmarshaler
// for 200 OK errors and response payloads.
// This function differs from the xmlUtil.UnmarshalXMLError
// func. It does not ignore the EOF error and passes it up.
// Related to bug fix for `s3 200 OK response with empty payload`
func s3unmarshalXMLError(v interface{}, stream io.Reader) error {
var errBuf bytes.Buffer
body := io.TeeReader(stream, &errBuf)
err := xml.NewDecoder(body).Decode(v)
if err != nil && err != io.EOF {
return awserr.NewUnmarshalError(err,
"failed to unmarshal error message", errBuf.Bytes())
}
return err
}