Browse Source

go fmt

pull/7481/head
chrislu 2 weeks ago
parent
commit
74e864bc65
  1. 1
      test/s3/sse/s3_sse_range_server_test.go
  2. 113
      weed/operation/upload_chunked.go
  3. 1
      weed/operation/upload_chunked_test.go
  4. 76
      weed/s3api/auth_credentials.go
  5. 4
      weed/s3api/custom_types.go
  6. 1
      weed/s3api/policy_conversion.go
  7. 11
      weed/s3api/policy_conversion_test.go
  8. 12
      weed/s3api/s3_constants/header.go
  9. 1
      weed/s3api/s3_sse_s3_multipart_test.go
  10. 1
      weed/s3api/s3api_bucket_policy_arn_test.go
  11. 7
      weed/s3api/s3api_implicit_directory_test.go
  12. 12
      weed/s3api/s3api_object_handlers_multipart.go
  13. 9
      weed/s3api/s3api_sse_chunk_metadata_test.go
  14. 1
      weed/s3api/s3api_sse_decrypt_test.go
  15. 1
      weed/s3api/s3api_sse_s3_upload_test.go
  16. 1
      weed/util/log_buffer/log_buffer_corruption_test.go
  17. 20
      weed/util/log_buffer/log_buffer_test.go

1
test/s3/sse/s3_sse_range_server_test.go

@ -443,4 +443,3 @@ func TestSSEMultipartRangeRequestsServerBehavior(t *testing.T) {
assert.Equal(t, expectedData, bodyBytes,
"Cross-part range content must be correctly decrypted and assembled")
}

113
weed/operation/upload_chunked.go

@ -26,16 +26,16 @@ type ChunkedUploadResult struct {
// ChunkedUploadOption contains options for chunked uploads
type ChunkedUploadOption struct {
ChunkSize int32
SmallFileLimit int64
Collection string
Replication string
DataCenter string
SaveSmallInline bool
Jwt security.EncodedJwt
MimeType string
AssignFunc func(ctx context.Context, count int) (*VolumeAssignRequest, *AssignResult, error)
UploadFunc func(ctx context.Context, data []byte, option *UploadOption) (*UploadResult, error) // Optional: for testing
ChunkSize int32
SmallFileLimit int64
Collection string
Replication string
DataCenter string
SaveSmallInline bool
Jwt security.EncodedJwt
MimeType string
AssignFunc func(ctx context.Context, count int) (*VolumeAssignRequest, *AssignResult, error)
UploadFunc func(ctx context.Context, data []byte, option *UploadOption) (*UploadResult, error) // Optional: for testing
}
var chunkBufferPool = sync.Pool{
@ -121,24 +121,24 @@ uploadLoop:
break
}
// For small files at offset 0, store inline instead of uploading
if chunkOffset == 0 && opt.SaveSmallInline && dataSize < opt.SmallFileLimit {
smallContent := make([]byte, dataSize)
n, readErr := io.ReadFull(bytesBuffer, smallContent)
chunkBufferPool.Put(bytesBuffer)
<-bytesBufferLimitChan
// For small files at offset 0, store inline instead of uploading
if chunkOffset == 0 && opt.SaveSmallInline && dataSize < opt.SmallFileLimit {
smallContent := make([]byte, dataSize)
n, readErr := io.ReadFull(bytesBuffer, smallContent)
chunkBufferPool.Put(bytesBuffer)
<-bytesBufferLimitChan
if readErr != nil {
return nil, fmt.Errorf("failed to read small content: read %d of %d bytes: %w", n, dataSize, readErr)
}
if readErr != nil {
return nil, fmt.Errorf("failed to read small content: read %d of %d bytes: %w", n, dataSize, readErr)
}
return &ChunkedUploadResult{
FileChunks: nil,
Md5Hash: md5Hash,
TotalSize: dataSize,
SmallContent: smallContent,
}, nil
}
return &ChunkedUploadResult{
FileChunks: nil,
Md5Hash: md5Hash,
TotalSize: dataSize,
SmallContent: smallContent,
}, nil
}
// Upload chunk in parallel goroutine
wg.Add(1)
@ -160,24 +160,24 @@ uploadLoop:
return
}
// Upload chunk data
uploadUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
// Upload chunk data
uploadUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
// Use per-assignment JWT if present, otherwise fall back to the original JWT
// This is critical for secured clusters where each volume assignment has its own JWT
jwt := opt.Jwt
if assignResult.Auth != "" {
jwt = assignResult.Auth
}
// Use per-assignment JWT if present, otherwise fall back to the original JWT
// This is critical for secured clusters where each volume assignment has its own JWT
jwt := opt.Jwt
if assignResult.Auth != "" {
jwt = assignResult.Auth
}
uploadOption := &UploadOption{
UploadUrl: uploadUrl,
Cipher: false,
IsInputCompressed: false,
MimeType: opt.MimeType,
PairMap: nil,
Jwt: jwt,
}
uploadOption := &UploadOption{
UploadUrl: uploadUrl,
Cipher: false,
IsInputCompressed: false,
MimeType: opt.MimeType,
PairMap: nil,
Jwt: jwt,
}
var uploadResult *UploadResult
var uploadResultErr error
@ -207,20 +207,20 @@ uploadLoop:
return
}
// Create chunk entry
// Set ModifiedTsNs to current time (nanoseconds) to track when upload completed
// This is critical for multipart uploads where the same part may be uploaded multiple times
// The part with the latest ModifiedTsNs is selected as the authoritative version
fid, _ := filer_pb.ToFileIdObject(assignResult.Fid)
chunk := &filer_pb.FileChunk{
FileId: assignResult.Fid,
Offset: offset,
Size: uint64(uploadResult.Size),
ModifiedTsNs: time.Now().UnixNano(),
ETag: uploadResult.ContentMd5,
Fid: fid,
CipherKey: uploadResult.CipherKey,
}
// Create chunk entry
// Set ModifiedTsNs to current time (nanoseconds) to track when upload completed
// This is critical for multipart uploads where the same part may be uploaded multiple times
// The part with the latest ModifiedTsNs is selected as the authoritative version
fid, _ := filer_pb.ToFileIdObject(assignResult.Fid)
chunk := &filer_pb.FileChunk{
FileId: assignResult.Fid,
Offset: offset,
Size: uint64(uploadResult.Size),
ModifiedTsNs: time.Now().UnixNano(),
ETag: uploadResult.ContentMd5,
Fid: fid,
CipherKey: uploadResult.CipherKey,
}
fileChunksLock.Lock()
fileChunks = append(fileChunks, chunk)
@ -265,4 +265,3 @@ uploadLoop:
SmallContent: nil,
}, nil
}

1
weed/operation/upload_chunked_test.go

@ -310,4 +310,3 @@ func TestUploadReaderInChunksReaderFailure(t *testing.T) {
t.Logf("✓ Got partial result on read failure: chunks=%d, totalSize=%d",
len(result.FileChunks), result.TotalSize)
}

76
weed/s3api/auth_credentials.go

@ -177,41 +177,41 @@ func NewIdentityAccessManagementWithStore(option *S3ApiServerOption, explicitSto
accessKeyId := os.Getenv("AWS_ACCESS_KEY_ID")
secretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
if accessKeyId != "" && secretAccessKey != "" {
glog.V(1).Infof("No S3 configuration found, using AWS environment variables as fallback")
if accessKeyId != "" && secretAccessKey != "" {
glog.V(1).Infof("No S3 configuration found, using AWS environment variables as fallback")
// Create environment variable identity name
identityNameSuffix := accessKeyId
if len(accessKeyId) > 8 {
identityNameSuffix = accessKeyId[:8]
}
// Create environment variable identity name
identityNameSuffix := accessKeyId
if len(accessKeyId) > 8 {
identityNameSuffix = accessKeyId[:8]
}
// Create admin identity with environment variable credentials
envIdentity := &Identity{
Name: "admin-" + identityNameSuffix,
Account: &AccountAdmin,
Credentials: []*Credential{
{
AccessKey: accessKeyId,
SecretKey: secretAccessKey,
// Create admin identity with environment variable credentials
envIdentity := &Identity{
Name: "admin-" + identityNameSuffix,
Account: &AccountAdmin,
Credentials: []*Credential{
{
AccessKey: accessKeyId,
SecretKey: secretAccessKey,
},
},
},
Actions: []Action{
s3_constants.ACTION_ADMIN,
},
}
Actions: []Action{
s3_constants.ACTION_ADMIN,
},
}
// Set as the only configuration
iam.m.Lock()
if len(iam.identities) == 0 {
iam.identities = []*Identity{envIdentity}
iam.accessKeyIdent = map[string]*Identity{accessKeyId: envIdentity}
iam.isAuthEnabled = true
}
iam.m.Unlock()
// Set as the only configuration
iam.m.Lock()
if len(iam.identities) == 0 {
iam.identities = []*Identity{envIdentity}
iam.accessKeyIdent = map[string]*Identity{accessKeyId: envIdentity}
iam.isAuthEnabled = true
}
iam.m.Unlock()
glog.V(1).Infof("Added admin identity from AWS environment variables: %s", envIdentity.Name)
}
glog.V(1).Infof("Added admin identity from AWS environment variables: %s", envIdentity.Name)
}
}
return iam
@ -460,13 +460,13 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
case authTypeJWT:
glog.V(3).Infof("jwt auth type detected, iamIntegration != nil? %t", iam.iamIntegration != nil)
r.Header.Set(s3_constants.AmzAuthType, "Jwt")
if iam.iamIntegration != nil {
identity, s3Err = iam.authenticateJWTWithIAM(r)
authType = "Jwt"
} else {
glog.V(2).Infof("IAM integration is nil, returning ErrNotImplemented")
return identity, s3err.ErrNotImplemented
}
if iam.iamIntegration != nil {
identity, s3Err = iam.authenticateJWTWithIAM(r)
authType = "Jwt"
} else {
glog.V(2).Infof("IAM integration is nil, returning ErrNotImplemented")
return identity, s3err.ErrNotImplemented
}
case authTypeAnonymous:
authType = "Anonymous"
if identity, found = iam.lookupAnonymous(); !found {
@ -621,7 +621,7 @@ func buildPrincipalARN(identity *Identity) string {
// Check if this is the anonymous user identity (authenticated as anonymous)
// S3 policies expect Principal: "*" for anonymous access
if identity.Name == s3_constants.AccountAnonymousId ||
(identity.Account != nil && identity.Account.Id == s3_constants.AccountAnonymousId) {
(identity.Account != nil && identity.Account.Id == s3_constants.AccountAnonymousId) {
return "*" // Anonymous user
}

4
weed/s3api/custom_types.go

@ -10,6 +10,6 @@ const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00"
// ConditionalHeaderResult holds the result of conditional header checking
type ConditionalHeaderResult struct {
ErrorCode s3err.ErrorCode
ETag string // ETag of the object (for 304 responses)
Entry *filer_pb.Entry // Entry fetched during conditional check (nil if not fetched or object doesn't exist)
ETag string // ETag of the object (for 304 responses)
Entry *filer_pb.Entry // Entry fetched during conditional check (nil if not fetched or object doesn't exist)
}

1
weed/s3api/policy_conversion.go

@ -236,4 +236,3 @@ func getMapKeys(m map[string]interface{}) []string {
}
return keys
}

11
weed/s3api/policy_conversion_test.go

@ -13,10 +13,10 @@ func TestConvertPolicyDocumentWithMixedTypes(t *testing.T) {
Version: "2012-10-17",
Statement: []policy.Statement{
{
Sid: "TestMixedTypes",
Effect: "Allow",
Action: []string{"s3:GetObject"},
Resource: []string{"arn:aws:s3:::bucket/*"},
Sid: "TestMixedTypes",
Effect: "Allow",
Action: []string{"s3:GetObject"},
Resource: []string{"arn:aws:s3:::bucket/*"},
Principal: []interface{}{"user1", 123, true}, // Mixed types
Condition: map[string]map[string]interface{}{
"NumericEquals": {
@ -116,7 +116,7 @@ func TestConvertPrincipalWithMapAndMixedTypes(t *testing.T) {
principalMap := map[string]interface{}{
"AWS": []interface{}{
"arn:aws:iam::123456789012:user/Alice",
456, // User ID as number
456, // User ID as number
true, // Some boolean value
},
}
@ -611,4 +611,3 @@ func TestConvertPolicyDocumentWithId(t *testing.T) {
t.Errorf("Expected 1 statement, got %d", len(dest.Statement))
}
}

12
weed/s3api/s3_constants/header.go

@ -39,13 +39,13 @@ const (
AmzObjectTaggingDirective = "X-Amz-Tagging-Directive"
AmzTagCount = "x-amz-tagging-count"
SeaweedFSIsDirectoryKey = "X-Seaweedfs-Is-Directory-Key"
SeaweedFSPartNumber = "X-Seaweedfs-Part-Number"
SeaweedFSUploadId = "X-Seaweedfs-Upload-Id"
SeaweedFSMultipartPartsCount = "X-Seaweedfs-Multipart-Parts-Count"
SeaweedFSIsDirectoryKey = "X-Seaweedfs-Is-Directory-Key"
SeaweedFSPartNumber = "X-Seaweedfs-Part-Number"
SeaweedFSUploadId = "X-Seaweedfs-Upload-Id"
SeaweedFSMultipartPartsCount = "X-Seaweedfs-Multipart-Parts-Count"
SeaweedFSMultipartPartBoundaries = "X-Seaweedfs-Multipart-Part-Boundaries" // JSON: [{part:1,start:0,end:2,etag:"abc"},{part:2,start:2,end:3,etag:"def"}]
SeaweedFSExpiresS3 = "X-Seaweedfs-Expires-S3"
AmzMpPartsCount = "x-amz-mp-parts-count"
SeaweedFSExpiresS3 = "X-Seaweedfs-Expires-S3"
AmzMpPartsCount = "x-amz-mp-parts-count"
// S3 ACL headers
AmzCannedAcl = "X-Amz-Acl"

1
weed/s3api/s3_sse_s3_multipart_test.go

@ -264,4 +264,3 @@ func TestSSES3EncryptionConsistency(t *testing.T) {
t.Error("Second decryption should also work with fresh stream")
}
}

1
weed/s3api/s3api_bucket_policy_arn_test.go

@ -123,4 +123,3 @@ func TestBuildPrincipalARN(t *testing.T) {
})
}
}

7
weed/s3api/s3api_implicit_directory_test.go

@ -180,9 +180,9 @@ func TestHasChildrenLogic(t *testing.T) {
description: "Should return false when no children exist (EOF)",
},
{
name: "Directory with leading slash in prefix",
bucket: "test-bucket",
prefix: "/dataset",
name: "Directory with leading slash in prefix",
bucket: "test-bucket",
prefix: "/dataset",
listResponse: &filer_pb.ListEntriesResponse{
Entry: &filer_pb.Entry{
Name: "file.parquet",
@ -283,4 +283,3 @@ func BenchmarkHasChildrenCheck(b *testing.B) {
// Expected: ~1-5ms per call (one gRPC LIST request with Limit=1)
b.Skip("Benchmark - requires full filer setup")
}

12
weed/s3api/s3api_object_handlers_multipart.go

@ -380,12 +380,12 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
r.Header.Set(s3_constants.SeaweedFSSSEKMSBaseIVHeader, base64.StdEncoding.EncodeToString(baseIV))
} else {
// Check if this upload uses SSE-S3
if err := s3a.handleSSES3MultipartHeaders(r, uploadEntry, uploadID); err != nil {
glog.Errorf("Failed to setup SSE-S3 multipart headers: %v", err)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
// Check if this upload uses SSE-S3
if err := s3a.handleSSES3MultipartHeaders(r, uploadEntry, uploadID); err != nil {
glog.Errorf("Failed to setup SSE-S3 multipart headers: %v", err)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
}
}
} else if !errors.Is(err, filer_pb.ErrNotFound) {

9
weed/s3api/s3api_sse_chunk_metadata_test.go

@ -36,8 +36,8 @@ func TestSSEKMSChunkMetadataAssignment(t *testing.T) {
// Simulate multi-chunk upload scenario (what putToFiler does after UploadReaderInChunks)
simulatedChunks := []*filer_pb.FileChunk{
{FileId: "chunk1", Offset: 0, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 0
{FileId: "chunk2", Offset: 8 * 1024 * 1024, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 8MB
{FileId: "chunk1", Offset: 0, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 0
{FileId: "chunk2", Offset: 8 * 1024 * 1024, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 8MB
{FileId: "chunk3", Offset: 16 * 1024 * 1024, Size: 4 * 1024 * 1024}, // 4MB chunk at offset 16MB
}
@ -167,8 +167,8 @@ func TestSSES3ChunkMetadataAssignment(t *testing.T) {
// Simulate multi-chunk upload scenario (what putToFiler does after UploadReaderInChunks)
simulatedChunks := []*filer_pb.FileChunk{
{FileId: "chunk1", Offset: 0, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 0
{FileId: "chunk2", Offset: 8 * 1024 * 1024, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 8MB
{FileId: "chunk1", Offset: 0, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 0
{FileId: "chunk2", Offset: 8 * 1024 * 1024, Size: 8 * 1024 * 1024}, // 8MB chunk at offset 8MB
{FileId: "chunk3", Offset: 16 * 1024 * 1024, Size: 4 * 1024 * 1024}, // 4MB chunk at offset 16MB
}
@ -359,4 +359,3 @@ func TestSSEChunkMetadataComparison(t *testing.T) {
}
})
}

1
weed/s3api/s3api_sse_decrypt_test.go

@ -187,4 +187,3 @@ func TestSSEDecryptionDifferences(t *testing.T) {
// This test documents the critical differences and serves as executable documentation
}

1
weed/s3api/s3api_sse_s3_upload_test.go

@ -255,4 +255,3 @@ func TestSSES3HeaderEncoding(t *testing.T) {
s3_constants.AESBlockSize, len(decodedBaseIV))
}
}

1
weed/util/log_buffer/log_buffer_corruption_test.go

@ -222,4 +222,3 @@ func TestNoSilentCorruption(t *testing.T) {
})
}
}

20
weed/util/log_buffer/log_buffer_test.go

@ -141,16 +141,16 @@ func TestReadFromBuffer_OldOffsetReturnsResumeFromDiskError(t *testing.T) {
// Add some data to the buffer if needed (at current offset position)
if tt.hasData {
testData := []byte("test message")
// Use AddLogEntryToBuffer to preserve offset information
if err := lb.AddLogEntryToBuffer(&filer_pb.LogEntry{
TsNs: time.Now().UnixNano(),
Key: []byte("key"),
Data: testData,
Offset: tt.currentOffset, // Add data at current offset
}); err != nil {
t.Fatalf("Failed to add log entry: %v", err)
}
testData := []byte("test message")
// Use AddLogEntryToBuffer to preserve offset information
if err := lb.AddLogEntryToBuffer(&filer_pb.LogEntry{
TsNs: time.Now().UnixNano(),
Key: []byte("key"),
Data: testData,
Offset: tt.currentOffset, // Add data at current offset
}); err != nil {
t.Fatalf("Failed to add log entry: %v", err)
}
}
// Create an offset-based position for the requested offset

Loading…
Cancel
Save