diff --git a/weed/cluster/lock_client.go b/weed/cluster/lock_client.go index a7387812e..ae3e4a8cd 100644 --- a/weed/cluster/lock_client.go +++ b/weed/cluster/lock_client.go @@ -3,6 +3,7 @@ package cluster import ( "context" "fmt" + "strings" "sync/atomic" "time" @@ -102,7 +103,11 @@ func (lc *LockClient) StartLongLivedLock(key string, owner string, onLockOwnerCh case <-lock.cancelCh: return default: - time.Sleep(lock_manager.RenewInterval) + if isLocked { + time.Sleep(5 * lock_manager.RenewInterval) + } else { + time.Sleep(lock_manager.RenewInterval) + } } } }() @@ -129,7 +134,11 @@ func (lock *LiveLock) AttemptToLock(lockDuration time.Duration) error { return err } if errorMessage != "" { - glog.V(1).Infof("LOCK: doLock returned error message for key=%s: %s", lock.key, errorMessage) + if strings.Contains(errorMessage, "lock already owned") { + glog.V(3).Infof("LOCK: doLock returned error message for key=%s: %s", lock.key, errorMessage) + } else { + glog.V(2).Infof("LOCK: doLock returned error message for key=%s: %s", lock.key, errorMessage) + } time.Sleep(time.Second) return fmt.Errorf("%v", errorMessage) } @@ -206,7 +215,7 @@ func (lock *LiveLock) doLock(lockDuration time.Duration) (errorMessage string, e errorMessage = resp.Error if resp.LockHostMovedTo != "" && resp.LockHostMovedTo != string(previousHostFiler) { // Only log if the host actually changed - glog.V(1).Infof("LOCK: Host changed from %s to %s for key=%s", previousHostFiler, resp.LockHostMovedTo, lock.key) + glog.V(2).Infof("LOCK: Host changed from %s to %s for key=%s", previousHostFiler, resp.LockHostMovedTo, lock.key) lock.hostFiler = pb.ServerAddress(resp.LockHostMovedTo) lock.lc.seedFiler = lock.hostFiler } else if resp.LockHostMovedTo != "" { @@ -214,12 +223,12 @@ func (lock *LiveLock) doLock(lockDuration time.Duration) (errorMessage string, e } if resp.LockOwner != "" && resp.LockOwner != previousOwner { // Only log if the owner actually changed - glog.V(1).Infof("LOCK: Owner changed from %s to %s for key=%s", previousOwner, resp.LockOwner, lock.key) + glog.V(2).Infof("LOCK: Owner changed from %s to %s for key=%s", previousOwner, resp.LockOwner, lock.key) lock.owner = resp.LockOwner } else if resp.LockOwner != "" { lock.owner = resp.LockOwner } else if previousOwner != "" { - glog.V(1).Infof("LOCK: Owner cleared for key=%s", lock.key) + glog.V(2).Infof("LOCK: Owner cleared for key=%s", lock.key) lock.owner = "" } } diff --git a/weed/s3api/s3api_object_handlers_list.go b/weed/s3api/s3api_object_handlers_list.go index b844a039d..22a671e67 100644 --- a/weed/s3api/s3api_object_handlers_list.go +++ b/weed/s3api/s3api_object_handlers_list.go @@ -53,19 +53,16 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("ListObjectsV2Handler %s", bucket) - originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys, allowUnordered, errCode := getListObjectsV2Args(r.URL.Query()) + glog.V(2).Infof("ListObjectsV2Handler bucket=%s prefix=%s", bucket, originalPrefix) + if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) return } - if maxKeys < 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys) - return - } + // maxKeys is uint16 here; negative values are rejected during parsing. // AWS S3 compatibility: allow-unordered cannot be used with delimiter if allowUnordered && delimiter != "" { @@ -121,10 +118,10 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("ListObjectsV1Handler %s", bucket) - originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys, allowUnordered, errCode := getListObjectsV1Args(r.URL.Query()) + glog.V(2).Infof("ListObjectsV1Handler bucket=%s prefix=%s", bucket, originalPrefix) + if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) return @@ -447,9 +444,8 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d // prefix and marker should be under dir, marker may contain "/" // maxKeys should be updated for each recursion // glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter) - if prefix == "/" && delimiter == "/" { - return - } + // When listing at bucket root with delimiter '/', prefix can be "/" after normalization. + // Returning early here would incorrectly hide all top-level entries (folders like "Veeam/"). if cursor.maxKeys <= 0 { return // Don't set isTruncated here - let caller decide based on whether more entries exist } @@ -720,7 +716,6 @@ func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFile return true, nil } - // compareWithDelimiter compares two strings for sorting, treating the delimiter character // as having lower precedence than other characters to match AWS S3 behavior. // For example, with delimiter '/', 'foo/' should come before 'foo+1/' even though '+' < '/' in ASCII. diff --git a/weed/s3api/s3api_object_handlers_list_test.go b/weed/s3api/s3api_object_handlers_list_test.go index 858d30731..b24771e8a 100644 --- a/weed/s3api/s3api_object_handlers_list_test.go +++ b/weed/s3api/s3api_object_handlers_list_test.go @@ -1,14 +1,63 @@ package s3api import ( + "context" + "io" "net/http/httptest" + "strings" "testing" "time" + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/stretchr/testify/assert" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) +type testListEntriesStream struct { + entries []*filer_pb.Entry + idx int +} + +func (s *testListEntriesStream) Recv() (*filer_pb.ListEntriesResponse, error) { + if s.idx >= len(s.entries) { + return nil, io.EOF + } + resp := &filer_pb.ListEntriesResponse{Entry: s.entries[s.idx]} + s.idx++ + return resp, nil +} + +func (s *testListEntriesStream) Header() (metadata.MD, error) { return metadata.MD{}, nil } +func (s *testListEntriesStream) Trailer() metadata.MD { return metadata.MD{} } +func (s *testListEntriesStream) Close() error { return nil } +func (s *testListEntriesStream) Context() context.Context { return context.Background() } +func (s *testListEntriesStream) SendMsg(m interface{}) error { return nil } +func (s *testListEntriesStream) RecvMsg(m interface{}) error { return nil } +func (s *testListEntriesStream) CloseSend() error { return nil } + +type testFilerClient struct { + filer_pb.SeaweedFilerClient + entriesByDir map[string][]*filer_pb.Entry +} + +func (c *testFilerClient) ListEntries(ctx context.Context, in *filer_pb.ListEntriesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[filer_pb.ListEntriesResponse], error) { + entries := c.entriesByDir[in.Directory] + // Simplified mock: implements basic prefix filtering but ignores Limit, StartFromFileName, and InclusiveStartFrom + // to keep test logic focused. Prefix "/" is treated as no filter for bucket root compatibility. + if in.Prefix != "" && in.Prefix != "/" { + filtered := make([]*filer_pb.Entry, 0) + for _, e := range entries { + if strings.HasPrefix(e.Name, in.Prefix) { + filtered = append(filtered, e) + } + } + entries = filtered + } + return &testListEntriesStream{entries: entries}, nil +} + func TestListObjectsHandler(t *testing.T) { // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html @@ -53,6 +102,13 @@ func Test_normalizePrefixMarker(t *testing.T) { wantAlignedPrefix string wantAlignedMarker string }{ + {"bucket root listing with delimiter", + args{"/", + ""}, + "", + "", + "", + }, {"prefix is a directory", args{"/parentDir/data/", ""}, @@ -146,6 +202,31 @@ func TestAllowUnorderedParameterValidation(t *testing.T) { }) } +func TestDoListFilerEntries_BucketRootPrefixSlashDelimiterSlash_ListsDirectories(t *testing.T) { + // Regression test for a bug where doListFilerEntries returned early when + // prefix == "/" && delimiter == "/", causing bucket-root folder listings + // (e.g. Veeam v13) to return empty results. + + s3a := &S3ApiServer{} + client := &testFilerClient{ + entriesByDir: map[string][]*filer_pb.Entry{ + "/buckets/test-bucket": { + {Name: "Veeam", IsDirectory: true, Attributes: &filer_pb.FuseAttributes{}}, + }, + }, + } + + cursor := &ListingCursor{maxKeys: 1000} + seen := make([]string, 0) + _, err := s3a.doListFilerEntries(client, "/buckets/test-bucket", "/", cursor, "", "/", false, func(dir string, entry *filer_pb.Entry) { + if entry.IsDirectory { + seen = append(seen, entry.Name) + } + }) + assert.NoError(t, err) + assert.Contains(t, seen, "Veeam") +} + func TestAllowUnorderedWithDelimiterValidation(t *testing.T) { t.Run("should return error when allow-unordered=true and delimiter are both present", func(t *testing.T) { // Create a request with both allow-unordered=true and delimiter diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go index e9c1f7ca3..6310e592e 100644 --- a/weed/s3api/s3api_object_handlers_put.go +++ b/weed/s3api/s3api_object_handlers_put.go @@ -76,7 +76,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectHandler %s %s", bucket, object) + glog.V(2).Infof("PutObjectHandler bucket=%s object=%s size=%d", bucket, object, r.ContentLength) _, err := validateContentMd5(r.Header) if err != nil {