Browse Source

early return

pull/7434/head
chrislu 1 month ago
parent
commit
8084fa9234
  1. 16
      test/s3/retention/s3_bucket_delete_with_lock_test.go
  2. 11
      weed/s3api/s3api_bucket_handlers.go

16
test/s3/retention/s3_bucket_delete_with_lock_test.go

@ -223,14 +223,18 @@ func TestBucketDeletionWithVersionedLocks(t *testing.T) {
// Clean up all versions // Clean up all versions
deleteAllObjectVersions(t, client, bucketName) deleteAllObjectVersions(t, client, bucketName)
// Wait for eventual consistency
time.Sleep(500 * time.Millisecond)
// Now delete bucket should succeed
_, err = client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
// Wait for eventual consistency and attempt to delete the bucket with retry
require.Eventually(t, func() bool {
_, err := client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName), Bucket: aws.String(bucketName),
}) })
require.NoError(t, err, "DeleteBucket should succeed after all locks expire")
if err != nil {
t.Logf("Retrying DeleteBucket due to: %v", err)
return false
}
return true
}, 5*time.Second, 500*time.Millisecond, "DeleteBucket should succeed after all locks expire")
t.Logf("Successfully deleted bucket after locks expired") t.Logf("Successfully deleted bucket after locks expired")
} }

11
weed/s3api/s3api_bucket_handlers.go

@ -339,6 +339,11 @@ func (s3a *S3ApiServer) hasObjectsWithActiveLocks(bucket string) (bool, error) {
return hasLocks, nil return hasLocks, nil
} }
const (
// lockCheckPaginationSize is the page size for listing directories during lock checks
lockCheckPaginationSize = 10000
)
// errStopPagination is a sentinel error to signal early termination of pagination // errStopPagination is a sentinel error to signal early termination of pagination
var errStopPagination = errors.New("stop pagination") var errStopPagination = errors.New("stop pagination")
@ -347,7 +352,7 @@ var errStopPagination = errors.New("stop pagination")
func (s3a *S3ApiServer) paginateEntries(dir string, fn func(entries []*filer_pb.Entry) error) error { func (s3a *S3ApiServer) paginateEntries(dir string, fn func(entries []*filer_pb.Entry) error) error {
startFrom := "" startFrom := ""
for { for {
entries, isLast, err := s3a.list(dir, "", startFrom, false, 10000)
entries, isLast, err := s3a.list(dir, "", startFrom, false, lockCheckPaginationSize)
if err != nil { if err != nil {
// Fail-safe: propagate error to prevent incorrect bucket deletion // Fail-safe: propagate error to prevent incorrect bucket deletion
return fmt.Errorf("failed to list directory %s: %w", dir, err) return fmt.Errorf("failed to list directory %s: %w", dir, err)
@ -427,6 +432,10 @@ func (s3a *S3ApiServer) recursivelyCheckLocks(dir string, relativePath string, h
if err := s3a.recursivelyCheckLocks(subDir, subRelativePath, hasLocks, currentTime); err != nil { if err := s3a.recursivelyCheckLocks(subDir, subRelativePath, hasLocks, currentTime); err != nil {
return err return err
} }
// Early exit if a locked object was found in the subdirectory
if *hasLocks {
return errStopPagination
}
} }
} }
return nil return nil

Loading…
Cancel
Save