Browse Source

s3api: preserve lifecycle config responses for Terraform (#8805)

* s3api: preserve lifecycle configs for terraform

* s3api: bound lifecycle config request bodies

* s3api: make bucket config updates copy-on-write

* s3api: tighten string slice cloning
pull/8807/head
Chris Lu 4 days ago
committed by GitHub
parent
commit
e3f052cd84
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 5
      weed/s3api/policy_engine/types.go
  2. 17
      weed/s3api/policy_engine/types_test.go
  3. 187
      weed/s3api/s3api_bucket_config.go
  4. 47
      weed/s3api/s3api_bucket_config_update_test.go
  5. 37
      weed/s3api/s3api_bucket_handlers.go
  6. 75
      weed/s3api/s3api_bucket_lifecycle_config.go
  7. 126
      weed/s3api/s3api_bucket_lifecycle_response_test.go
  8. 5
      weed/s3api/s3api_handlers.go

5
weed/s3api/policy_engine/types.go

@ -100,6 +100,11 @@ func NewStringOrStringSlicePtr(values ...string) *StringOrStringSlice {
return &StringOrStringSlice{values: values}
}
// CloneStringOrStringSlice returns a copy with its own backing slice.
func CloneStringOrStringSlice(value StringOrStringSlice) StringOrStringSlice {
return StringOrStringSlice{values: append([]string(nil), value.values...)}
}
// PolicyConditions represents policy conditions with proper typing
type PolicyConditions map[string]map[string]StringOrStringSlice

17
weed/s3api/policy_engine/types_test.go

@ -0,0 +1,17 @@
package policy_engine
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCloneStringOrStringSliceCopiesBackingSlice(t *testing.T) {
original := NewStringOrStringSlice("s3:GetObject", "s3:PutObject")
cloned := CloneStringOrStringSlice(original)
cloned.values[0] = "s3:DeleteObject"
assert.Equal(t, []string{"s3:GetObject", "s3:PutObject"}, original.Strings())
assert.Equal(t, []string{"s3:DeleteObject", "s3:PutObject"}, cloned.Strings())
}

187
weed/s3api/s3api_bucket_config.go

@ -425,44 +425,62 @@ func (s3a *S3ApiServer) updateBucketConfig(bucket string, updateFn func(*BucketC
return errCode
}
nextConfig := cloneBucketConfig(config)
if nextConfig == nil {
glog.Errorf("updateBucketConfig: failed to clone config for bucket %s", bucket)
return s3err.ErrInternalError
}
// Apply update function
if err := updateFn(config); err != nil {
if err := updateFn(nextConfig); err != nil {
glog.Errorf("updateBucketConfig: update function failed for bucket %s: %v", bucket, err)
return s3err.ErrInternalError
}
// Prepare extended attributes
if config.Entry.Extended == nil {
config.Entry.Extended = make(map[string][]byte)
if nextConfig.Entry == nil {
glog.Errorf("updateBucketConfig: missing bucket entry for %s", bucket)
return s3err.ErrInternalError
}
if nextConfig.Entry.Extended == nil {
nextConfig.Entry.Extended = make(map[string][]byte)
}
// Update extended attributes
if config.Versioning != "" {
config.Entry.Extended[s3_constants.ExtVersioningKey] = []byte(config.Versioning)
}
if config.Ownership != "" {
config.Entry.Extended[s3_constants.ExtOwnershipKey] = []byte(config.Ownership)
}
if config.ACL != nil {
config.Entry.Extended[s3_constants.ExtAmzAclKey] = config.ACL
}
if config.Owner != "" {
config.Entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(config.Owner)
if nextConfig.Versioning != "" {
nextConfig.Entry.Extended[s3_constants.ExtVersioningKey] = []byte(nextConfig.Versioning)
} else {
delete(nextConfig.Entry.Extended, s3_constants.ExtVersioningKey)
}
if nextConfig.Ownership != "" {
nextConfig.Entry.Extended[s3_constants.ExtOwnershipKey] = []byte(nextConfig.Ownership)
} else {
delete(nextConfig.Entry.Extended, s3_constants.ExtOwnershipKey)
}
if nextConfig.ACL != nil {
nextConfig.Entry.Extended[s3_constants.ExtAmzAclKey] = nextConfig.ACL
} else {
delete(nextConfig.Entry.Extended, s3_constants.ExtAmzAclKey)
}
if nextConfig.Owner != "" {
nextConfig.Entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(nextConfig.Owner)
} else {
delete(nextConfig.Entry.Extended, s3_constants.ExtAmzOwnerKey)
}
// Update Object Lock configuration
if config.ObjectLockConfig != nil {
glog.V(3).Infof("updateBucketConfig: storing Object Lock config for bucket %s: %+v", bucket, config.ObjectLockConfig)
if err := StoreObjectLockConfigurationInExtended(config.Entry, config.ObjectLockConfig); err != nil {
if nextConfig.ObjectLockConfig != nil {
glog.V(3).Infof("updateBucketConfig: storing Object Lock config for bucket %s: %+v", bucket, nextConfig.ObjectLockConfig)
if err := StoreObjectLockConfigurationInExtended(nextConfig.Entry, nextConfig.ObjectLockConfig); err != nil {
glog.Errorf("updateBucketConfig: failed to store Object Lock configuration for bucket %s: %v", bucket, err)
return s3err.ErrInternalError
}
glog.V(3).Infof("updateBucketConfig: stored Object Lock config in extended attributes for bucket %s, key=%s, value=%s",
bucket, s3_constants.ExtObjectLockEnabledKey, string(config.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
bucket, s3_constants.ExtObjectLockEnabledKey, string(nextConfig.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
}
// Save to filer
glog.V(3).Infof("updateBucketConfig: saving entry to filer for bucket %s", bucket)
err := s3a.updateEntry(s3a.bucketRoot(bucket), config.Entry)
err := s3a.updateEntry(s3a.bucketRoot(bucket), nextConfig.Entry)
if err != nil {
glog.Errorf("updateBucketConfig: failed to update bucket entry for %s: %v", bucket, err)
return s3err.ErrInternalError
@ -470,11 +488,140 @@ func (s3a *S3ApiServer) updateBucketConfig(bucket string, updateFn func(*BucketC
glog.V(3).Infof("updateBucketConfig: saved entry to filer for bucket %s", bucket)
// Update cache
s3a.bucketConfigCache.Set(bucket, config)
s3a.bucketConfigCache.Set(bucket, nextConfig)
return s3err.ErrNone
}
func cloneBucketConfig(config *BucketConfig) *BucketConfig {
if config == nil {
return nil
}
cloned := *config
if config.ACL != nil {
cloned.ACL = append([]byte(nil), config.ACL...)
}
if config.Entry != nil {
cloned.Entry = proto.Clone(config.Entry).(*filer_pb.Entry)
}
if config.CORS != nil {
cloned.CORS = cloneCORSConfiguration(config.CORS)
}
if config.ObjectLockConfig != nil {
cloned.ObjectLockConfig = cloneObjectLockConfiguration(config.ObjectLockConfig)
}
if config.BucketPolicy != nil {
cloned.BucketPolicy = cloneBucketPolicy(config.BucketPolicy)
}
return &cloned
}
func cloneCORSConfiguration(config *cors.CORSConfiguration) *cors.CORSConfiguration {
if config == nil {
return nil
}
cloned := &cors.CORSConfiguration{
CORSRules: make([]cors.CORSRule, len(config.CORSRules)),
}
for i, rule := range config.CORSRules {
cloned.CORSRules[i] = cors.CORSRule{
AllowedHeaders: append([]string(nil), rule.AllowedHeaders...),
AllowedMethods: append([]string(nil), rule.AllowedMethods...),
AllowedOrigins: append([]string(nil), rule.AllowedOrigins...),
ExposeHeaders: append([]string(nil), rule.ExposeHeaders...),
ID: rule.ID,
}
if rule.MaxAgeSeconds != nil {
maxAge := *rule.MaxAgeSeconds
cloned.CORSRules[i].MaxAgeSeconds = &maxAge
}
}
return cloned
}
func cloneObjectLockConfiguration(config *ObjectLockConfiguration) *ObjectLockConfiguration {
if config == nil {
return nil
}
cloned := &ObjectLockConfiguration{
XMLNS: config.XMLNS,
XMLName: config.XMLName,
ObjectLockEnabled: config.ObjectLockEnabled,
}
if config.Rule != nil {
cloned.Rule = &ObjectLockRule{
XMLName: config.Rule.XMLName,
}
if config.Rule.DefaultRetention != nil {
cloned.Rule.DefaultRetention = &DefaultRetention{
XMLName: config.Rule.DefaultRetention.XMLName,
Mode: config.Rule.DefaultRetention.Mode,
Days: config.Rule.DefaultRetention.Days,
Years: config.Rule.DefaultRetention.Years,
DaysSet: config.Rule.DefaultRetention.DaysSet,
YearsSet: config.Rule.DefaultRetention.YearsSet,
}
}
}
return cloned
}
func cloneBucketPolicy(policyDoc *policy_engine.PolicyDocument) *policy_engine.PolicyDocument {
if policyDoc == nil {
return nil
}
cloned := &policy_engine.PolicyDocument{
Version: policyDoc.Version,
Statement: make([]policy_engine.PolicyStatement, len(policyDoc.Statement)),
}
for i, statement := range policyDoc.Statement {
cloned.Statement[i] = clonePolicyStatement(statement)
}
return cloned
}
func clonePolicyStatement(statement policy_engine.PolicyStatement) policy_engine.PolicyStatement {
cloned := policy_engine.PolicyStatement{
Sid: statement.Sid,
Effect: statement.Effect,
Action: cloneStringOrStringSlice(statement.Action),
NotResource: cloneStringOrStringSlicePtr(statement.NotResource),
Principal: cloneStringOrStringSlicePtr(statement.Principal),
Resource: cloneStringOrStringSlicePtr(statement.Resource),
}
if statement.Condition != nil {
cloned.Condition = make(policy_engine.PolicyConditions, len(statement.Condition))
for operator, operands := range statement.Condition {
copiedOperands := make(map[string]policy_engine.StringOrStringSlice, len(operands))
for key, value := range operands {
copiedOperands[key] = cloneStringOrStringSlice(value)
}
cloned.Condition[operator] = copiedOperands
}
}
return cloned
}
func cloneStringOrStringSlice(value policy_engine.StringOrStringSlice) policy_engine.StringOrStringSlice {
return policy_engine.CloneStringOrStringSlice(value)
}
func cloneStringOrStringSlicePtr(value *policy_engine.StringOrStringSlice) *policy_engine.StringOrStringSlice {
if value == nil {
return nil
}
cloned := policy_engine.CloneStringOrStringSlice(*value)
return &cloned
}
// isVersioningEnabled checks if versioning is enabled for a bucket (with caching)
func (s3a *S3ApiServer) isVersioningEnabled(bucket string) (bool, error) {
config, errCode := s3a.getBucketConfig(bucket)

47
weed/s3api/s3api_bucket_config_update_test.go

@ -0,0 +1,47 @@
package s3api
import (
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestUpdateBucketConfigDoesNotMutateCacheOnPersistFailure(t *testing.T) {
const bucket = "cleanup-test-net"
s3a := newTestS3ApiServerWithMemoryIAM(t, nil)
s3a.option = &S3ApiServerOption{
BucketsPath: "/buckets",
}
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute)
s3a.bucketConfigCache.Set(bucket, &BucketConfig{
Name: bucket,
Versioning: "",
Entry: &filer_pb.Entry{
Name: bucket,
IsDirectory: true,
Extended: map[string][]byte{},
},
})
// This test server only has in-memory IAM state and no filer connection, so
// updateBucketConfig is expected to fail during the persist step with an
// internal error. The assertion below verifies that the cached config stays
// unchanged when that write path fails.
errCode := s3a.updateBucketConfig(bucket, func(config *BucketConfig) error {
config.Versioning = s3_constants.VersioningEnabled
return nil
})
require.Equal(t, s3err.ErrInternalError, errCode)
config, found := s3a.bucketConfigCache.Get(bucket)
require.True(t, found)
assert.Empty(t, config.Versioning)
assert.NotContains(t, config.Entry.Extended, s3_constants.ExtVersioningKey)
}

37
weed/s3api/s3api_bucket_handlers.go

@ -7,6 +7,7 @@ import (
"encoding/xml"
"errors"
"fmt"
"io"
"math"
"net/http"
"sort"
@ -815,6 +816,14 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr
s3err.WriteErrorResponse(w, r, err)
return
}
if lifecycleXML, transitionMinimumObjectSize, found, errCode := s3a.getStoredBucketLifecycleConfiguration(bucket); errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
} else if found {
w.Header().Set(bucketLifecycleTransitionMinimumObjectSizeHeader, transitionMinimumObjectSize)
writeSuccessResponseXMLBytes(w, r, lifecycleXML)
return
}
// ReadFilerConfFromFilers provides multi-filer failover
fc, err := filer.ReadFilerConfFromFilers(s3a.option.Filers, s3a.option.GrpcDialOption, nil)
if err != nil {
@ -855,6 +864,9 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr
})
}
if len(response.Rules) > 0 {
w.Header().Set(bucketLifecycleTransitionMinimumObjectSizeHeader, defaultLifecycleTransitionMinimumObjectSize)
}
writeSuccessResponseXML(w, r, response)
}
@ -892,8 +904,21 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr
return
}
r.Body = http.MaxBytesReader(w, r.Body, maxBucketLifecycleConfigurationSize)
lifecycleXML, err := io.ReadAll(r.Body)
if err != nil {
glog.Warningf("PutBucketLifecycleConfigurationHandler read body: %s", err)
var maxBytesErr *http.MaxBytesError
if errors.As(err, &maxBytesErr) {
s3err.WriteErrorResponse(w, r, s3err.ErrEntityTooLarge)
return
}
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
return
}
lifeCycleConfig := Lifecycle{}
if err := xmlDecoder(r.Body, &lifeCycleConfig, r.ContentLength); err != nil {
if err := xmlDecoder(bytes.NewReader(lifecycleXML), &lifeCycleConfig, int64(len(lifecycleXML))); err != nil {
glog.Warningf("PutBucketLifecycleConfigurationHandler xml decode: %s", err)
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
return
@ -988,6 +1013,11 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr
}
}
if errCode := s3a.storeBucketLifecycleConfiguration(bucket, lifecycleXML, r.Header.Get(bucketLifecycleTransitionMinimumObjectSizeHeader)); errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
writeSuccessResponseEmpty(w, r)
}
@ -1038,6 +1068,11 @@ func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *h
}
}
if errCode := s3a.clearStoredBucketLifecycleConfiguration(bucket); errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
}

75
weed/s3api/s3api_bucket_lifecycle_config.go

@ -0,0 +1,75 @@
package s3api
import (
"fmt"
"strings"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
const (
bucketLifecycleConfigurationXMLKey = "s3-bucket-lifecycle-configuration-xml"
bucketLifecycleTransitionMinimumObjectSizeKey = "s3-bucket-lifecycle-transition-default-minimum-object-size"
bucketLifecycleTransitionMinimumObjectSizeHeader = "X-Amz-Transition-Default-Minimum-Object-Size"
defaultLifecycleTransitionMinimumObjectSize = "all_storage_classes_128K"
maxBucketLifecycleConfigurationSize = 1 << 20
)
func normalizeBucketLifecycleTransitionMinimumObjectSize(value string) string {
value = strings.TrimSpace(value)
if value == "" {
return defaultLifecycleTransitionMinimumObjectSize
}
return value
}
func (s3a *S3ApiServer) getStoredBucketLifecycleConfiguration(bucket string) ([]byte, string, bool, s3err.ErrorCode) {
config, errCode := s3a.getBucketConfig(bucket)
if errCode != s3err.ErrNone {
return nil, "", false, errCode
}
if config.Entry == nil || config.Entry.Extended == nil {
return nil, "", false, s3err.ErrNone
}
lifecycleXML, found := config.Entry.Extended[bucketLifecycleConfigurationXMLKey]
if !found || len(lifecycleXML) == 0 {
return nil, "", false, s3err.ErrNone
}
transitionMinimumObjectSize := normalizeBucketLifecycleTransitionMinimumObjectSize(
string(config.Entry.Extended[bucketLifecycleTransitionMinimumObjectSizeKey]),
)
return append([]byte(nil), lifecycleXML...), transitionMinimumObjectSize, true, s3err.ErrNone
}
func (s3a *S3ApiServer) storeBucketLifecycleConfiguration(bucket string, lifecycleXML []byte, transitionMinimumObjectSize string) s3err.ErrorCode {
return s3a.updateBucketConfig(bucket, func(config *BucketConfig) error {
if config.Entry == nil {
return fmt.Errorf("bucket %s is missing its filer entry", bucket)
}
if config.Entry.Extended == nil {
config.Entry.Extended = make(map[string][]byte)
}
config.Entry.Extended[bucketLifecycleConfigurationXMLKey] = append([]byte(nil), lifecycleXML...)
config.Entry.Extended[bucketLifecycleTransitionMinimumObjectSizeKey] = []byte(
normalizeBucketLifecycleTransitionMinimumObjectSize(transitionMinimumObjectSize),
)
return nil
})
}
func (s3a *S3ApiServer) clearStoredBucketLifecycleConfiguration(bucket string) s3err.ErrorCode {
return s3a.updateBucketConfig(bucket, func(config *BucketConfig) error {
if config.Entry == nil {
return fmt.Errorf("bucket %s is missing its filer entry", bucket)
}
delete(config.Entry.Extended, bucketLifecycleConfigurationXMLKey)
delete(config.Entry.Extended, bucketLifecycleTransitionMinimumObjectSizeKey)
return nil
})
}

126
weed/s3api/s3api_bucket_lifecycle_response_test.go

@ -0,0 +1,126 @@
package s3api
import (
"errors"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetBucketLifecycleConfigurationHandlerUsesStoredLifecycleConfig(t *testing.T) {
const bucket = "cleanup-test-net"
const lifecycleXML = `<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Filter></Filter><ID>rotation</ID><Expiration><Days>1</Days></Expiration><Status>Enabled</Status></Rule></LifecycleConfiguration>`
s3a := newTestS3ApiServerWithMemoryIAM(t, nil)
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"}
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute)
s3a.bucketConfigCache.Set(bucket, &BucketConfig{
Name: bucket,
Entry: &filer_pb.Entry{
Extended: map[string][]byte{
bucketLifecycleConfigurationXMLKey: []byte(lifecycleXML),
bucketLifecycleTransitionMinimumObjectSizeKey: []byte("varies_by_storage_class"),
},
},
})
req := httptest.NewRequest(http.MethodGet, "/"+bucket+"?lifecycle", nil)
req = mux.SetURLVars(req, map[string]string{"bucket": bucket})
resp := httptest.NewRecorder()
s3a.GetBucketLifecycleConfigurationHandler(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
assert.Equal(t, "varies_by_storage_class", resp.Header().Get(bucketLifecycleTransitionMinimumObjectSizeHeader))
assert.Equal(t, lifecycleXML, resp.Body.String())
}
func TestGetBucketLifecycleConfigurationHandlerDefaultsTransitionMinimumObjectSize(t *testing.T) {
const bucket = "cleanup-test-net"
const lifecycleXML = `<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Filter></Filter><ID>rotation</ID><Expiration><Days>1</Days></Expiration><Status>Enabled</Status></Rule></LifecycleConfiguration>`
s3a := newTestS3ApiServerWithMemoryIAM(t, nil)
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"}
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute)
s3a.bucketConfigCache.Set(bucket, &BucketConfig{
Name: bucket,
Entry: &filer_pb.Entry{
Extended: map[string][]byte{
bucketLifecycleConfigurationXMLKey: []byte(lifecycleXML),
},
},
})
req := httptest.NewRequest(http.MethodGet, "/"+bucket+"?lifecycle", nil)
req = mux.SetURLVars(req, map[string]string{"bucket": bucket})
resp := httptest.NewRecorder()
s3a.GetBucketLifecycleConfigurationHandler(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
assert.Equal(t, defaultLifecycleTransitionMinimumObjectSize, resp.Header().Get(bucketLifecycleTransitionMinimumObjectSizeHeader))
assert.Equal(t, lifecycleXML, resp.Body.String())
}
func TestPutBucketLifecycleConfigurationHandlerRejectsOversizedBody(t *testing.T) {
const bucket = "cleanup-test-net"
s3a := newTestS3ApiServerWithMemoryIAM(t, nil)
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"}
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute)
s3a.bucketConfigCache.Set(bucket, &BucketConfig{
Name: bucket,
Entry: &filer_pb.Entry{},
})
req := httptest.NewRequest(http.MethodPut, "/"+bucket+"?lifecycle", strings.NewReader(strings.Repeat("x", maxBucketLifecycleConfigurationSize+1)))
req = mux.SetURLVars(req, map[string]string{"bucket": bucket})
resp := httptest.NewRecorder()
s3a.PutBucketLifecycleConfigurationHandler(resp, req)
require.Equal(t, s3err.GetAPIError(s3err.ErrEntityTooLarge).HTTPStatusCode, resp.Code)
assert.Contains(t, resp.Body.String(), "<Code>EntityTooLarge</Code>")
}
func TestPutBucketLifecycleConfigurationHandlerMapsReadErrorsToInvalidRequest(t *testing.T) {
const bucket = "cleanup-test-net"
s3a := newTestS3ApiServerWithMemoryIAM(t, nil)
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"}
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute)
s3a.bucketConfigCache.Set(bucket, &BucketConfig{
Name: bucket,
Entry: &filer_pb.Entry{},
})
req := httptest.NewRequest(http.MethodPut, "/"+bucket+"?lifecycle", nil)
req = mux.SetURLVars(req, map[string]string{"bucket": bucket})
req.Body = failingReadCloser{err: errors.New("read failed")}
resp := httptest.NewRecorder()
s3a.PutBucketLifecycleConfigurationHandler(resp, req)
require.Equal(t, s3err.GetAPIError(s3err.ErrInvalidRequest).HTTPStatusCode, resp.Code)
assert.Contains(t, resp.Body.String(), "<Code>InvalidRequest</Code>")
}
type failingReadCloser struct {
err error
}
func (f failingReadCloser) Read(_ []byte) (int, error) {
return 0, f.err
}
func (f failingReadCloser) Close() error {
return nil
}

5
weed/s3api/s3api_handlers.go

@ -100,6 +100,11 @@ func writeSuccessResponseXML(w http.ResponseWriter, r *http.Request, response in
s3err.PostLog(r, http.StatusOK, s3err.ErrNone)
}
func writeSuccessResponseXMLBytes(w http.ResponseWriter, r *http.Request, response []byte) {
s3err.WriteResponse(w, r, http.StatusOK, response, s3err.MimeXML)
s3err.PostLog(r, http.StatusOK, s3err.ErrNone)
}
func writeSuccessResponseEmpty(w http.ResponseWriter, r *http.Request) {
s3err.WriteEmptyResponse(w, r, http.StatusOK)
}

Loading…
Cancel
Save