Browse Source

Fix error on deleting non-empty bucket (#8376)

* Move check for non-empty bucket deletion out of `WithFilerClient` call

* Added proper checking if a bucket has "user" objects
pull/7183/merge
Michał Szynkiewicz 1 day ago
committed by GitHub
parent
commit
2f837c4780
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 8
      .github/workflows/s3-example-integration-tests.yml
  2. 52
      test/s3/normal/s3_integration_test.go
  3. 57
      weed/s3api/s3api_bucket_handlers.go

8
.github/workflows/s3-example-integration-tests.yml

@ -39,6 +39,14 @@ jobs:
echo "=== Running S3 Integration Tests ==="
go test -v -timeout=60s -run TestS3Integration ./...
- name: Run S3 DeleteBucketNotEmpty Tests
timeout-minutes: 15
working-directory: test/s3/normal
run: |
set -x
echo "=== Running S3 DeleteBucketNotEmpty Tests ==="
go test -v -timeout=60s -run TestS3DeleteBucketNotEmpty ./...
- name: Run IAM Integration Tests
timeout-minutes: 15
working-directory: test/s3/normal

52
test/s3/normal/s3_integration_test.go

@ -116,8 +116,9 @@ func findAvailablePort() (int, error) {
return addr.Port, nil
}
// startMiniCluster starts a weed mini instance directly without exec
func startMiniCluster(t *testing.T) (*TestCluster, error) {
// startMiniCluster starts a weed mini instance directly without exec.
// Extra flags (e.g. "-s3.allowDeleteBucketNotEmpty=false") can be appended via extraArgs.
func startMiniCluster(t *testing.T, extraArgs ...string) (*TestCluster, error) {
// Find available ports
masterPort, err := findAvailablePort()
if err != nil {
@ -192,7 +193,7 @@ func startMiniCluster(t *testing.T) (*TestCluster, error) {
// Configure args for mini command
// Note: When running via 'go test', os.Args[0] is the test binary
// We need to make it look like we're running 'weed mini'
os.Args = []string{
os.Args = append([]string{
"weed",
"-dir=" + testDir,
"-master.port=" + strconv.Itoa(masterPort),
@ -205,7 +206,7 @@ func startMiniCluster(t *testing.T) (*TestCluster, error) {
"-ip=127.0.0.1",
"-master.peers=none", // Faster startup
"-s3.iam.readOnly=false", // Enable IAM write operations for tests
}
}, extraArgs...)
// Suppress most logging during tests
glog.MaxSize = 1024 * 1024
@ -779,6 +780,49 @@ func testDeleteBucket(t *testing.T, cluster *TestCluster) {
t.Logf("✓ Deleted bucket: %s", bucketName)
}
func TestS3DeleteBucketNotEmpty(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
cluster, err := startMiniCluster(t, "-s3.allowDeleteBucketNotEmpty=false")
require.NoError(t, err)
defer cluster.Stop()
t.Run("DeleteNonEmptyBucketFails", func(t *testing.T) {
bucketName := createTestBucket(t, cluster, "test-notempty-")
objectKey := "keep-me.txt"
// Put an object so the bucket is non-empty
_, err := cluster.s3Client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
Body: bytes.NewReader([]byte("data")),
})
require.NoError(t, err)
// Attempt to delete the non-empty bucket — must fail with BucketNotEmpty (409)
_, err = cluster.s3Client.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.Error(t, err, "deleting a non-empty bucket should fail")
var awsErr awserr.Error
require.ErrorAs(t, err, &awsErr)
assert.Equal(t, "BucketNotEmpty", awsErr.Code(),
"expected BucketNotEmpty error code, got %s: %s", awsErr.Code(), awsErr.Message())
})
t.Run("DeleteEmptyBucketSucceeds", func(t *testing.T) {
bucketName := createTestBucket(t, cluster, "test-empty-")
// Delete the empty bucket — should succeed even with the flag
_, err := cluster.s3Client.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "deleting an empty bucket should succeed")
})
}
// randomString generates a random string for unique naming
func randomString(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"

57
weed/s3api/s3api_bucket_handlers.go

@ -371,20 +371,18 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
}
}
err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
if !s3a.option.AllowDeleteBucketNotEmpty {
entries, _, err := s3a.list(s3a.option.BucketsPath+"/"+bucket, "", "", false, 2)
if err != nil {
return fmt.Errorf("failed to list bucket %s: %v", bucket, err)
}
for _, entry := range entries {
// Allow bucket deletion if only special directories remain
if entry.Name != s3_constants.MultipartUploadsFolder &&
!strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
return errors.New(s3err.GetAPIError(s3err.ErrBucketNotEmpty).Code)
}
}
if !s3a.option.AllowDeleteBucketNotEmpty {
if hasUserObjects, err := s3a.bucketHasUserObjects(bucket); err != nil {
glog.Errorf("failed to list bucket %s: %v", bucket, err)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
} else if hasUserObjects {
s3err.WriteErrorResponse(w, r, s3err.ErrBucketNotEmpty)
return
}
}
err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
// delete collection
deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
@ -400,11 +398,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
})
if err != nil {
s3ErrorCode := s3err.ErrInternalError
if err.Error() == s3err.GetAPIError(s3err.ErrBucketNotEmpty).Code {
s3ErrorCode = s3err.ErrBucketNotEmpty
}
s3err.WriteErrorResponse(w, r, s3ErrorCode)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
@ -421,6 +415,33 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
}
// bucketHasUserObjects checks whether a bucket contains any non-special entries.
// Special entries (.uploads, *.versions) are internal to S3 and don't count as user objects.
func (s3a *S3ApiServer) bucketHasUserObjects(bucket string) (bool, error) {
bucketPath := s3a.option.BucketsPath + "/" + bucket
startFrom := ""
// Start with a small batch — most non-empty buckets have a real object early.
// If we only find special entries, switch to larger batches to page through quickly.
limit := uint32(10)
for {
entries, isLast, err := s3a.list(bucketPath, "", startFrom, false, limit)
if err != nil {
return false, err
}
for _, entry := range entries {
if entry.Name != s3_constants.MultipartUploadsFolder &&
!strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
return true, nil
}
startFrom = entry.Name
}
if isLast {
return false, nil
}
limit = 1000
}
}
// hasObjectsWithActiveLocks checks if any objects in the bucket have active retention or legal hold
// Delegates to the shared HasObjectsWithActiveLocks function in object_lock_utils.go
func (s3a *S3ApiServer) hasObjectsWithActiveLocks(ctx context.Context, bucket string) (bool, error) {

Loading…
Cancel
Save