Browse Source

s3api: fix bucket-root listing w/ delimiter (#7827)

* s3api: fix bucket-root listing w/ delimiter

* test: improve mock robustness for bucket-root listing test

- Make testListEntriesStream implement interface explicitly without embedding
- Add prefix filtering logic to testFilerClient to simulate real filer behavior
- Special-case prefix='/' to not filter for bucket root compatibility
- Add required imports for metadata and strings packages

This addresses review comments about test mock brittleness and accuracy.

* test: add clarifying comment for mock filtering behavior

Add detailed comment explaining which ListEntriesRequest parameters
are implemented (Prefix) vs ignored (Limit, StartFromFileName, etc.)
in the test mock to improve code documentation and future maintenance.

* logging

* less logs

* less check if already locked
pull/6224/merge
Chris Lu 20 hours ago
committed by GitHub
parent
commit
f63d9ad390
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 19
      weed/cluster/lock_client.go
  2. 19
      weed/s3api/s3api_object_handlers_list.go
  3. 81
      weed/s3api/s3api_object_handlers_list_test.go
  4. 2
      weed/s3api/s3api_object_handlers_put.go

19
weed/cluster/lock_client.go

@ -3,6 +3,7 @@ package cluster
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"sync/atomic" "sync/atomic"
"time" "time"
@ -102,7 +103,11 @@ func (lc *LockClient) StartLongLivedLock(key string, owner string, onLockOwnerCh
case <-lock.cancelCh: case <-lock.cancelCh:
return return
default: default:
time.Sleep(lock_manager.RenewInterval)
if isLocked {
time.Sleep(5 * lock_manager.RenewInterval)
} else {
time.Sleep(lock_manager.RenewInterval)
}
} }
} }
}() }()
@ -129,7 +134,11 @@ func (lock *LiveLock) AttemptToLock(lockDuration time.Duration) error {
return err return err
} }
if errorMessage != "" { if errorMessage != "" {
glog.V(1).Infof("LOCK: doLock returned error message for key=%s: %s", lock.key, errorMessage)
if strings.Contains(errorMessage, "lock already owned") {
glog.V(3).Infof("LOCK: doLock returned error message for key=%s: %s", lock.key, errorMessage)
} else {
glog.V(2).Infof("LOCK: doLock returned error message for key=%s: %s", lock.key, errorMessage)
}
time.Sleep(time.Second) time.Sleep(time.Second)
return fmt.Errorf("%v", errorMessage) return fmt.Errorf("%v", errorMessage)
} }
@ -206,7 +215,7 @@ func (lock *LiveLock) doLock(lockDuration time.Duration) (errorMessage string, e
errorMessage = resp.Error errorMessage = resp.Error
if resp.LockHostMovedTo != "" && resp.LockHostMovedTo != string(previousHostFiler) { if resp.LockHostMovedTo != "" && resp.LockHostMovedTo != string(previousHostFiler) {
// Only log if the host actually changed // Only log if the host actually changed
glog.V(1).Infof("LOCK: Host changed from %s to %s for key=%s", previousHostFiler, resp.LockHostMovedTo, lock.key)
glog.V(2).Infof("LOCK: Host changed from %s to %s for key=%s", previousHostFiler, resp.LockHostMovedTo, lock.key)
lock.hostFiler = pb.ServerAddress(resp.LockHostMovedTo) lock.hostFiler = pb.ServerAddress(resp.LockHostMovedTo)
lock.lc.seedFiler = lock.hostFiler lock.lc.seedFiler = lock.hostFiler
} else if resp.LockHostMovedTo != "" { } else if resp.LockHostMovedTo != "" {
@ -214,12 +223,12 @@ func (lock *LiveLock) doLock(lockDuration time.Duration) (errorMessage string, e
} }
if resp.LockOwner != "" && resp.LockOwner != previousOwner { if resp.LockOwner != "" && resp.LockOwner != previousOwner {
// Only log if the owner actually changed // Only log if the owner actually changed
glog.V(1).Infof("LOCK: Owner changed from %s to %s for key=%s", previousOwner, resp.LockOwner, lock.key)
glog.V(2).Infof("LOCK: Owner changed from %s to %s for key=%s", previousOwner, resp.LockOwner, lock.key)
lock.owner = resp.LockOwner lock.owner = resp.LockOwner
} else if resp.LockOwner != "" { } else if resp.LockOwner != "" {
lock.owner = resp.LockOwner lock.owner = resp.LockOwner
} else if previousOwner != "" { } else if previousOwner != "" {
glog.V(1).Infof("LOCK: Owner cleared for key=%s", lock.key)
glog.V(2).Infof("LOCK: Owner cleared for key=%s", lock.key)
lock.owner = "" lock.owner = ""
} }
} }

19
weed/s3api/s3api_object_handlers_list.go

@ -53,19 +53,16 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
// collect parameters // collect parameters
bucket, _ := s3_constants.GetBucketAndObject(r) bucket, _ := s3_constants.GetBucketAndObject(r)
glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys, allowUnordered, errCode := getListObjectsV2Args(r.URL.Query()) originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys, allowUnordered, errCode := getListObjectsV2Args(r.URL.Query())
glog.V(2).Infof("ListObjectsV2Handler bucket=%s prefix=%s", bucket, originalPrefix)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return
} }
if maxKeys < 0 {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
return
}
// maxKeys is uint16 here; negative values are rejected during parsing.
// AWS S3 compatibility: allow-unordered cannot be used with delimiter // AWS S3 compatibility: allow-unordered cannot be used with delimiter
if allowUnordered && delimiter != "" { if allowUnordered && delimiter != "" {
@ -121,10 +118,10 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
// collect parameters // collect parameters
bucket, _ := s3_constants.GetBucketAndObject(r) bucket, _ := s3_constants.GetBucketAndObject(r)
glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys, allowUnordered, errCode := getListObjectsV1Args(r.URL.Query()) originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys, allowUnordered, errCode := getListObjectsV1Args(r.URL.Query())
glog.V(2).Infof("ListObjectsV1Handler bucket=%s prefix=%s", bucket, originalPrefix)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return
@ -447,9 +444,8 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
// prefix and marker should be under dir, marker may contain "/" // prefix and marker should be under dir, marker may contain "/"
// maxKeys should be updated for each recursion // maxKeys should be updated for each recursion
// glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter) // glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter)
if prefix == "/" && delimiter == "/" {
return
}
// When listing at bucket root with delimiter '/', prefix can be "/" after normalization.
// Returning early here would incorrectly hide all top-level entries (folders like "Veeam/").
if cursor.maxKeys <= 0 { if cursor.maxKeys <= 0 {
return // Don't set isTruncated here - let caller decide based on whether more entries exist return // Don't set isTruncated here - let caller decide based on whether more entries exist
} }
@ -720,7 +716,6 @@ func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFile
return true, nil return true, nil
} }
// compareWithDelimiter compares two strings for sorting, treating the delimiter character // compareWithDelimiter compares two strings for sorting, treating the delimiter character
// as having lower precedence than other characters to match AWS S3 behavior. // as having lower precedence than other characters to match AWS S3 behavior.
// For example, with delimiter '/', 'foo/' should come before 'foo+1/' even though '+' < '/' in ASCII. // For example, with delimiter '/', 'foo/' should come before 'foo+1/' even though '+' < '/' in ASCII.

81
weed/s3api/s3api_object_handlers_list_test.go

@ -1,14 +1,63 @@
package s3api package s3api
import ( import (
"context"
"io"
"net/http/httptest" "net/http/httptest"
"strings"
"testing" "testing"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
grpc "google.golang.org/grpc"
"google.golang.org/grpc/metadata"
) )
type testListEntriesStream struct {
entries []*filer_pb.Entry
idx int
}
func (s *testListEntriesStream) Recv() (*filer_pb.ListEntriesResponse, error) {
if s.idx >= len(s.entries) {
return nil, io.EOF
}
resp := &filer_pb.ListEntriesResponse{Entry: s.entries[s.idx]}
s.idx++
return resp, nil
}
func (s *testListEntriesStream) Header() (metadata.MD, error) { return metadata.MD{}, nil }
func (s *testListEntriesStream) Trailer() metadata.MD { return metadata.MD{} }
func (s *testListEntriesStream) Close() error { return nil }
func (s *testListEntriesStream) Context() context.Context { return context.Background() }
func (s *testListEntriesStream) SendMsg(m interface{}) error { return nil }
func (s *testListEntriesStream) RecvMsg(m interface{}) error { return nil }
func (s *testListEntriesStream) CloseSend() error { return nil }
type testFilerClient struct {
filer_pb.SeaweedFilerClient
entriesByDir map[string][]*filer_pb.Entry
}
func (c *testFilerClient) ListEntries(ctx context.Context, in *filer_pb.ListEntriesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[filer_pb.ListEntriesResponse], error) {
entries := c.entriesByDir[in.Directory]
// Simplified mock: implements basic prefix filtering but ignores Limit, StartFromFileName, and InclusiveStartFrom
// to keep test logic focused. Prefix "/" is treated as no filter for bucket root compatibility.
if in.Prefix != "" && in.Prefix != "/" {
filtered := make([]*filer_pb.Entry, 0)
for _, e := range entries {
if strings.HasPrefix(e.Name, in.Prefix) {
filtered = append(filtered, e)
}
}
entries = filtered
}
return &testListEntriesStream{entries: entries}, nil
}
func TestListObjectsHandler(t *testing.T) { func TestListObjectsHandler(t *testing.T) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
@ -53,6 +102,13 @@ func Test_normalizePrefixMarker(t *testing.T) {
wantAlignedPrefix string wantAlignedPrefix string
wantAlignedMarker string wantAlignedMarker string
}{ }{
{"bucket root listing with delimiter",
args{"/",
""},
"",
"",
"",
},
{"prefix is a directory", {"prefix is a directory",
args{"/parentDir/data/", args{"/parentDir/data/",
""}, ""},
@ -146,6 +202,31 @@ func TestAllowUnorderedParameterValidation(t *testing.T) {
}) })
} }
func TestDoListFilerEntries_BucketRootPrefixSlashDelimiterSlash_ListsDirectories(t *testing.T) {
// Regression test for a bug where doListFilerEntries returned early when
// prefix == "/" && delimiter == "/", causing bucket-root folder listings
// (e.g. Veeam v13) to return empty results.
s3a := &S3ApiServer{}
client := &testFilerClient{
entriesByDir: map[string][]*filer_pb.Entry{
"/buckets/test-bucket": {
{Name: "Veeam", IsDirectory: true, Attributes: &filer_pb.FuseAttributes{}},
},
},
}
cursor := &ListingCursor{maxKeys: 1000}
seen := make([]string, 0)
_, err := s3a.doListFilerEntries(client, "/buckets/test-bucket", "/", cursor, "", "/", false, func(dir string, entry *filer_pb.Entry) {
if entry.IsDirectory {
seen = append(seen, entry.Name)
}
})
assert.NoError(t, err)
assert.Contains(t, seen, "Veeam")
}
func TestAllowUnorderedWithDelimiterValidation(t *testing.T) { func TestAllowUnorderedWithDelimiterValidation(t *testing.T) {
t.Run("should return error when allow-unordered=true and delimiter are both present", func(t *testing.T) { t.Run("should return error when allow-unordered=true and delimiter are both present", func(t *testing.T) {
// Create a request with both allow-unordered=true and delimiter // Create a request with both allow-unordered=true and delimiter

2
weed/s3api/s3api_object_handlers_put.go

@ -76,7 +76,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
bucket, object := s3_constants.GetBucketAndObject(r) bucket, object := s3_constants.GetBucketAndObject(r)
glog.V(3).Infof("PutObjectHandler %s %s", bucket, object)
glog.V(2).Infof("PutObjectHandler bucket=%s object=%s size=%d", bucket, object, r.ContentLength)
_, err := validateContentMd5(r.Header) _, err := validateContentMd5(r.Header)
if err != nil { if err != nil {

Loading…
Cancel
Save