Browse Source
s3api: preserve lifecycle config responses for Terraform (#8805)
s3api: preserve lifecycle config responses for Terraform (#8805)
* s3api: preserve lifecycle configs for terraform * s3api: bound lifecycle config request bodies * s3api: make bucket config updates copy-on-write * s3api: tighten string slice cloningpull/8807/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 478 additions and 21 deletions
-
5weed/s3api/policy_engine/types.go
-
17weed/s3api/policy_engine/types_test.go
-
187weed/s3api/s3api_bucket_config.go
-
47weed/s3api/s3api_bucket_config_update_test.go
-
37weed/s3api/s3api_bucket_handlers.go
-
75weed/s3api/s3api_bucket_lifecycle_config.go
-
126weed/s3api/s3api_bucket_lifecycle_response_test.go
-
5weed/s3api/s3api_handlers.go
@ -0,0 +1,17 @@ |
|||||
|
package policy_engine |
||||
|
|
||||
|
import ( |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/stretchr/testify/assert" |
||||
|
) |
||||
|
|
||||
|
func TestCloneStringOrStringSliceCopiesBackingSlice(t *testing.T) { |
||||
|
original := NewStringOrStringSlice("s3:GetObject", "s3:PutObject") |
||||
|
|
||||
|
cloned := CloneStringOrStringSlice(original) |
||||
|
cloned.values[0] = "s3:DeleteObject" |
||||
|
|
||||
|
assert.Equal(t, []string{"s3:GetObject", "s3:PutObject"}, original.Strings()) |
||||
|
assert.Equal(t, []string{"s3:DeleteObject", "s3:PutObject"}, cloned.Strings()) |
||||
|
} |
||||
@ -0,0 +1,47 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"testing" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
"github.com/stretchr/testify/require" |
||||
|
) |
||||
|
|
||||
|
func TestUpdateBucketConfigDoesNotMutateCacheOnPersistFailure(t *testing.T) { |
||||
|
const bucket = "cleanup-test-net" |
||||
|
|
||||
|
s3a := newTestS3ApiServerWithMemoryIAM(t, nil) |
||||
|
s3a.option = &S3ApiServerOption{ |
||||
|
BucketsPath: "/buckets", |
||||
|
} |
||||
|
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute) |
||||
|
s3a.bucketConfigCache.Set(bucket, &BucketConfig{ |
||||
|
Name: bucket, |
||||
|
Versioning: "", |
||||
|
Entry: &filer_pb.Entry{ |
||||
|
Name: bucket, |
||||
|
IsDirectory: true, |
||||
|
Extended: map[string][]byte{}, |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
// This test server only has in-memory IAM state and no filer connection, so
|
||||
|
// updateBucketConfig is expected to fail during the persist step with an
|
||||
|
// internal error. The assertion below verifies that the cached config stays
|
||||
|
// unchanged when that write path fails.
|
||||
|
errCode := s3a.updateBucketConfig(bucket, func(config *BucketConfig) error { |
||||
|
config.Versioning = s3_constants.VersioningEnabled |
||||
|
return nil |
||||
|
}) |
||||
|
|
||||
|
require.Equal(t, s3err.ErrInternalError, errCode) |
||||
|
|
||||
|
config, found := s3a.bucketConfigCache.Get(bucket) |
||||
|
require.True(t, found) |
||||
|
assert.Empty(t, config.Versioning) |
||||
|
assert.NotContains(t, config.Entry.Extended, s3_constants.ExtVersioningKey) |
||||
|
} |
||||
@ -0,0 +1,75 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"strings" |
||||
|
|
||||
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
bucketLifecycleConfigurationXMLKey = "s3-bucket-lifecycle-configuration-xml" |
||||
|
bucketLifecycleTransitionMinimumObjectSizeKey = "s3-bucket-lifecycle-transition-default-minimum-object-size" |
||||
|
bucketLifecycleTransitionMinimumObjectSizeHeader = "X-Amz-Transition-Default-Minimum-Object-Size" |
||||
|
defaultLifecycleTransitionMinimumObjectSize = "all_storage_classes_128K" |
||||
|
maxBucketLifecycleConfigurationSize = 1 << 20 |
||||
|
) |
||||
|
|
||||
|
func normalizeBucketLifecycleTransitionMinimumObjectSize(value string) string { |
||||
|
value = strings.TrimSpace(value) |
||||
|
if value == "" { |
||||
|
return defaultLifecycleTransitionMinimumObjectSize |
||||
|
} |
||||
|
return value |
||||
|
} |
||||
|
|
||||
|
func (s3a *S3ApiServer) getStoredBucketLifecycleConfiguration(bucket string) ([]byte, string, bool, s3err.ErrorCode) { |
||||
|
config, errCode := s3a.getBucketConfig(bucket) |
||||
|
if errCode != s3err.ErrNone { |
||||
|
return nil, "", false, errCode |
||||
|
} |
||||
|
if config.Entry == nil || config.Entry.Extended == nil { |
||||
|
return nil, "", false, s3err.ErrNone |
||||
|
} |
||||
|
|
||||
|
lifecycleXML, found := config.Entry.Extended[bucketLifecycleConfigurationXMLKey] |
||||
|
if !found || len(lifecycleXML) == 0 { |
||||
|
return nil, "", false, s3err.ErrNone |
||||
|
} |
||||
|
|
||||
|
transitionMinimumObjectSize := normalizeBucketLifecycleTransitionMinimumObjectSize( |
||||
|
string(config.Entry.Extended[bucketLifecycleTransitionMinimumObjectSizeKey]), |
||||
|
) |
||||
|
|
||||
|
return append([]byte(nil), lifecycleXML...), transitionMinimumObjectSize, true, s3err.ErrNone |
||||
|
} |
||||
|
|
||||
|
func (s3a *S3ApiServer) storeBucketLifecycleConfiguration(bucket string, lifecycleXML []byte, transitionMinimumObjectSize string) s3err.ErrorCode { |
||||
|
return s3a.updateBucketConfig(bucket, func(config *BucketConfig) error { |
||||
|
if config.Entry == nil { |
||||
|
return fmt.Errorf("bucket %s is missing its filer entry", bucket) |
||||
|
} |
||||
|
if config.Entry.Extended == nil { |
||||
|
config.Entry.Extended = make(map[string][]byte) |
||||
|
} |
||||
|
|
||||
|
config.Entry.Extended[bucketLifecycleConfigurationXMLKey] = append([]byte(nil), lifecycleXML...) |
||||
|
config.Entry.Extended[bucketLifecycleTransitionMinimumObjectSizeKey] = []byte( |
||||
|
normalizeBucketLifecycleTransitionMinimumObjectSize(transitionMinimumObjectSize), |
||||
|
) |
||||
|
|
||||
|
return nil |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func (s3a *S3ApiServer) clearStoredBucketLifecycleConfiguration(bucket string) s3err.ErrorCode { |
||||
|
return s3a.updateBucketConfig(bucket, func(config *BucketConfig) error { |
||||
|
if config.Entry == nil { |
||||
|
return fmt.Errorf("bucket %s is missing its filer entry", bucket) |
||||
|
} |
||||
|
|
||||
|
delete(config.Entry.Extended, bucketLifecycleConfigurationXMLKey) |
||||
|
delete(config.Entry.Extended, bucketLifecycleTransitionMinimumObjectSizeKey) |
||||
|
return nil |
||||
|
}) |
||||
|
} |
||||
@ -0,0 +1,126 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"net/http" |
||||
|
"net/http/httptest" |
||||
|
"strings" |
||||
|
"testing" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/gorilla/mux" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
"github.com/stretchr/testify/require" |
||||
|
) |
||||
|
|
||||
|
func TestGetBucketLifecycleConfigurationHandlerUsesStoredLifecycleConfig(t *testing.T) { |
||||
|
const bucket = "cleanup-test-net" |
||||
|
const lifecycleXML = `<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Filter></Filter><ID>rotation</ID><Expiration><Days>1</Days></Expiration><Status>Enabled</Status></Rule></LifecycleConfiguration>` |
||||
|
|
||||
|
s3a := newTestS3ApiServerWithMemoryIAM(t, nil) |
||||
|
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"} |
||||
|
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute) |
||||
|
s3a.bucketConfigCache.Set(bucket, &BucketConfig{ |
||||
|
Name: bucket, |
||||
|
Entry: &filer_pb.Entry{ |
||||
|
Extended: map[string][]byte{ |
||||
|
bucketLifecycleConfigurationXMLKey: []byte(lifecycleXML), |
||||
|
bucketLifecycleTransitionMinimumObjectSizeKey: []byte("varies_by_storage_class"), |
||||
|
}, |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
req := httptest.NewRequest(http.MethodGet, "/"+bucket+"?lifecycle", nil) |
||||
|
req = mux.SetURLVars(req, map[string]string{"bucket": bucket}) |
||||
|
resp := httptest.NewRecorder() |
||||
|
|
||||
|
s3a.GetBucketLifecycleConfigurationHandler(resp, req) |
||||
|
|
||||
|
require.Equal(t, http.StatusOK, resp.Code) |
||||
|
assert.Equal(t, "varies_by_storage_class", resp.Header().Get(bucketLifecycleTransitionMinimumObjectSizeHeader)) |
||||
|
assert.Equal(t, lifecycleXML, resp.Body.String()) |
||||
|
} |
||||
|
|
||||
|
func TestGetBucketLifecycleConfigurationHandlerDefaultsTransitionMinimumObjectSize(t *testing.T) { |
||||
|
const bucket = "cleanup-test-net" |
||||
|
const lifecycleXML = `<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Filter></Filter><ID>rotation</ID><Expiration><Days>1</Days></Expiration><Status>Enabled</Status></Rule></LifecycleConfiguration>` |
||||
|
|
||||
|
s3a := newTestS3ApiServerWithMemoryIAM(t, nil) |
||||
|
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"} |
||||
|
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute) |
||||
|
s3a.bucketConfigCache.Set(bucket, &BucketConfig{ |
||||
|
Name: bucket, |
||||
|
Entry: &filer_pb.Entry{ |
||||
|
Extended: map[string][]byte{ |
||||
|
bucketLifecycleConfigurationXMLKey: []byte(lifecycleXML), |
||||
|
}, |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
req := httptest.NewRequest(http.MethodGet, "/"+bucket+"?lifecycle", nil) |
||||
|
req = mux.SetURLVars(req, map[string]string{"bucket": bucket}) |
||||
|
resp := httptest.NewRecorder() |
||||
|
|
||||
|
s3a.GetBucketLifecycleConfigurationHandler(resp, req) |
||||
|
|
||||
|
require.Equal(t, http.StatusOK, resp.Code) |
||||
|
assert.Equal(t, defaultLifecycleTransitionMinimumObjectSize, resp.Header().Get(bucketLifecycleTransitionMinimumObjectSizeHeader)) |
||||
|
assert.Equal(t, lifecycleXML, resp.Body.String()) |
||||
|
} |
||||
|
|
||||
|
func TestPutBucketLifecycleConfigurationHandlerRejectsOversizedBody(t *testing.T) { |
||||
|
const bucket = "cleanup-test-net" |
||||
|
|
||||
|
s3a := newTestS3ApiServerWithMemoryIAM(t, nil) |
||||
|
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"} |
||||
|
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute) |
||||
|
s3a.bucketConfigCache.Set(bucket, &BucketConfig{ |
||||
|
Name: bucket, |
||||
|
Entry: &filer_pb.Entry{}, |
||||
|
}) |
||||
|
|
||||
|
req := httptest.NewRequest(http.MethodPut, "/"+bucket+"?lifecycle", strings.NewReader(strings.Repeat("x", maxBucketLifecycleConfigurationSize+1))) |
||||
|
req = mux.SetURLVars(req, map[string]string{"bucket": bucket}) |
||||
|
resp := httptest.NewRecorder() |
||||
|
|
||||
|
s3a.PutBucketLifecycleConfigurationHandler(resp, req) |
||||
|
|
||||
|
require.Equal(t, s3err.GetAPIError(s3err.ErrEntityTooLarge).HTTPStatusCode, resp.Code) |
||||
|
assert.Contains(t, resp.Body.String(), "<Code>EntityTooLarge</Code>") |
||||
|
} |
||||
|
|
||||
|
func TestPutBucketLifecycleConfigurationHandlerMapsReadErrorsToInvalidRequest(t *testing.T) { |
||||
|
const bucket = "cleanup-test-net" |
||||
|
|
||||
|
s3a := newTestS3ApiServerWithMemoryIAM(t, nil) |
||||
|
s3a.option = &S3ApiServerOption{BucketsPath: "/buckets"} |
||||
|
s3a.bucketConfigCache = NewBucketConfigCache(time.Minute) |
||||
|
s3a.bucketConfigCache.Set(bucket, &BucketConfig{ |
||||
|
Name: bucket, |
||||
|
Entry: &filer_pb.Entry{}, |
||||
|
}) |
||||
|
|
||||
|
req := httptest.NewRequest(http.MethodPut, "/"+bucket+"?lifecycle", nil) |
||||
|
req = mux.SetURLVars(req, map[string]string{"bucket": bucket}) |
||||
|
req.Body = failingReadCloser{err: errors.New("read failed")} |
||||
|
resp := httptest.NewRecorder() |
||||
|
|
||||
|
s3a.PutBucketLifecycleConfigurationHandler(resp, req) |
||||
|
|
||||
|
require.Equal(t, s3err.GetAPIError(s3err.ErrInvalidRequest).HTTPStatusCode, resp.Code) |
||||
|
assert.Contains(t, resp.Body.String(), "<Code>InvalidRequest</Code>") |
||||
|
} |
||||
|
|
||||
|
type failingReadCloser struct { |
||||
|
err error |
||||
|
} |
||||
|
|
||||
|
func (f failingReadCloser) Read(_ []byte) (int, error) { |
||||
|
return 0, f.err |
||||
|
} |
||||
|
|
||||
|
func (f failingReadCloser) Close() error { |
||||
|
return nil |
||||
|
} |
||||
Write
Preview
Loading…
Cancel
Save
Reference in new issue