Browse Source

go fmt

pull/7434/head
chrislu 1 month ago
parent
commit
d15ab56be7
  1. 5
      test/s3/retention/s3_bucket_delete_with_lock_test.go
  2. 2
      weed/command/benchmark.go
  3. 6
      weed/command/download.go
  4. 2
      weed/s3api/auth_credentials_subscribe.go
  5. 2
      weed/s3api/auto_signature_v4_test.go
  6. 4
      weed/s3api/s3api_bucket_config.go
  7. 10
      weed/s3api/s3api_object_handlers_acl.go
  8. 4
      weed/s3api/s3api_object_handlers_list.go
  9. 12
      weed/s3api/s3api_object_versioning.go
  10. 10
      weed/util/net_timeout.go

5
test/s3/retention/s3_bucket_delete_with_lock_test.go

@ -136,7 +136,7 @@ func TestBucketDeletionWithObjectLock(t *testing.T) {
}
return true
}, 5*time.Second, 500*time.Millisecond, "DeleteBucket should succeed when no objects have active locks")
t.Logf("Successfully deleted bucket without active locks")
})
}
@ -205,7 +205,7 @@ func TestBucketDeletionWithVersionedLocks(t *testing.T) {
}
return true
}, 5*time.Second, 500*time.Millisecond, "DeleteBucket should succeed after all locks expire")
t.Logf("Successfully deleted bucket after locks expired")
}
@ -237,4 +237,3 @@ func TestBucketDeletionWithoutObjectLock(t *testing.T) {
require.NoError(t, err, "DeleteBucket should succeed for regular bucket")
t.Logf("Successfully deleted regular bucket without object lock")
}

2
weed/command/benchmark.go

@ -141,7 +141,7 @@ func runBenchmark(cmd *Command, args []string) bool {
fmt.Fprintln(os.Stderr, "Error: -readOnly and -writeOnly are mutually exclusive.")
return false
}
doWrite := true
doRead := true
if *b.readOnly {

6
weed/command/download.go

@ -23,9 +23,9 @@ var (
)
type DownloadOptions struct {
master *string
server *string // deprecated, for backward compatibility
dir *string
master *string
server *string // deprecated, for backward compatibility
dir *string
}
func init() {

2
weed/s3api/auth_credentials_subscribe.go

@ -109,7 +109,7 @@ func (s3a *S3ApiServer) updateBucketConfigCacheFromEntry(entry *filer_pb.Entry)
bucket := entry.Name
glog.V(3).Infof("updateBucketConfigCacheFromEntry: called for bucket %s, ExtObjectLockEnabledKey=%s",
glog.V(3).Infof("updateBucketConfigCacheFromEntry: called for bucket %s, ExtObjectLockEnabledKey=%s",
bucket, string(entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
// Create new bucket config from the entry

2
weed/s3api/auto_signature_v4_test.go

@ -491,7 +491,7 @@ func TestSignatureV4WithoutProxy(t *testing.T) {
// Set forwarded headers
r.Header.Set("Host", tt.host)
// First, verify that extractHostHeader returns the expected value
extractedHost := extractHostHeader(r)
if extractedHost != tt.expectedHost {

4
weed/s3api/s3api_bucket_config.go

@ -350,7 +350,7 @@ func (s3a *S3ApiServer) getBucketConfig(bucket string) (*BucketConfig, s3err.Err
// Extract configuration from extended attributes
if entry.Extended != nil {
glog.V(3).Infof("getBucketConfig: checking extended attributes for bucket %s, ExtObjectLockEnabledKey value=%s",
glog.V(3).Infof("getBucketConfig: checking extended attributes for bucket %s, ExtObjectLockEnabledKey value=%s",
bucket, string(entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
if versioning, exists := entry.Extended[s3_constants.ExtVersioningKey]; exists {
config.Versioning = string(versioning)
@ -435,7 +435,7 @@ func (s3a *S3ApiServer) updateBucketConfig(bucket string, updateFn func(*BucketC
glog.Errorf("updateBucketConfig: failed to store Object Lock configuration for bucket %s: %v", bucket, err)
return s3err.ErrInternalError
}
glog.V(3).Infof("updateBucketConfig: stored Object Lock config in extended attributes for bucket %s, key=%s, value=%s",
glog.V(3).Infof("updateBucketConfig: stored Object Lock config in extended attributes for bucket %s, key=%s, value=%s",
bucket, s3_constants.ExtObjectLockEnabledKey, string(config.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
}

10
weed/s3api/s3api_object_handlers_acl.go

@ -320,11 +320,11 @@ func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Reque
}
if actualVersionId == "null" || actualVersionId == "" {
// Null version (pre-versioning object) - stored as regular file
updateDirectory = s3a.option.BucketsPath + "/" + bucket
} else {
// Versioned object - stored in .versions directory
updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + s3_constants.VersionsFolder
// Null version (pre-versioning object) - stored as regular file
updateDirectory = s3a.option.BucketsPath + "/" + bucket
} else {
// Versioned object - stored in .versions directory
updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + s3_constants.VersionsFolder
}
}
} else {

4
weed/s3api/s3api_object_handlers_list.go

@ -510,8 +510,8 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
continue
}
// Skip .versions directories in regular list operations but track them for logical object creation
if strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
// Skip .versions directories in regular list operations but track them for logical object creation
if strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
glog.V(4).Infof("Found .versions directory: %s", entry.Name)
versionsDirs = append(versionsDirs, entry.Name)
continue

12
weed/s3api/s3api_object_versioning.go

@ -300,10 +300,10 @@ func (s3a *S3ApiServer) findVersionsRecursively(currentPath, relativePath string
continue
}
// Check if this is a .versions directory
if strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
// Extract object name from .versions directory name
objectKey := strings.TrimSuffix(entryPath, s3_constants.VersionsFolder)
// Check if this is a .versions directory
if strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
// Extract object name from .versions directory name
objectKey := strings.TrimSuffix(entryPath, s3_constants.VersionsFolder)
normalizedObjectKey := removeDuplicateSlashes(objectKey)
// Mark both keys as processed for backward compatibility
processedObjects[objectKey] = true
@ -418,8 +418,8 @@ func (s3a *S3ApiServer) findVersionsRecursively(currentPath, relativePath string
}
}
// Check if a .versions directory exists for this object
versionsObjectPath := normalizedObjectKey + s3_constants.VersionsFolder
// Check if a .versions directory exists for this object
versionsObjectPath := normalizedObjectKey + s3_constants.VersionsFolder
_, versionsErr := s3a.getEntry(currentPath, versionsObjectPath)
if versionsErr == nil {
// .versions directory exists

10
weed/util/net_timeout.go

@ -13,7 +13,7 @@ const (
// minThroughputBytesPerSecond defines the minimum expected throughput (4KB/s)
// Used to calculate timeout scaling based on data transferred
minThroughputBytesPerSecond = 4000
// graceTimeCapMultiplier caps the grace period for slow clients at 3x base timeout
// This prevents indefinite connections while allowing time for server-side chunk fetches
graceTimeCapMultiplier = 3
@ -90,17 +90,17 @@ func (c *Conn) Write(b []byte) (count int, e error) {
// Calculate timeout with two components:
// 1. Base timeout scaled by cumulative data (minimum throughput of 4KB/s)
// 2. Additional grace period if there was a gap since last write (for chunk fetch delays)
// Calculate expected bytes per timeout period based on minimum throughput (4KB/s)
// Example: with WriteTimeout=30s, bytesPerTimeout = 4000 * 30 = 120KB
// After writing 1MB: multiplier = 1,000,000/120,000 + 1 ≈ 9, baseTimeout = 30s * 9 = 270s
bytesPerTimeout := calculateBytesPerTimeout(c.WriteTimeout)
timeoutMultiplier := time.Duration(c.bytesWritten/bytesPerTimeout + 1)
baseTimeout := c.WriteTimeout * timeoutMultiplier
// If it's been a while since last write, add grace time for server-side chunk fetches
// But cap it to avoid keeping slow clients connected indefinitely
//
//
// The comparison uses unscaled WriteTimeout intentionally: triggers grace when idle time
// exceeds base timeout, independent of throughput scaling.
if !c.lastWrite.IsZero() {
@ -120,7 +120,7 @@ func (c *Conn) Write(b []byte) (count int, e error) {
baseTimeout += graceTime
}
}
err := c.Conn.SetWriteDeadline(now.Add(baseTimeout))
if err != nil {
return 0, err

Loading…
Cancel
Save