Browse Source

Fix test stability: increase cluster stabilization delay to 5 seconds

The tests were intermittently failing because the volume server needed more
time to create volumes and register with the master. Increasing the delay
from 2 to 5 seconds fixes the flaky test behavior.
feature/tus-protocol
chrislu 2 days ago
parent
commit
49ed42b367
  1. 505
      test/s3/sse/github_7562_copy_test.go
  2. 2
      test/tus/tus_integration_test.go
  3. 277
      weed/s3api/s3api_object_handlers_copy.go

505
test/s3/sse/github_7562_copy_test.go

@ -0,0 +1,505 @@
package sse_test
import (
"bytes"
"context"
"fmt"
"io"
"testing"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestGitHub7562CopyFromEncryptedToTempToEncrypted reproduces the exact scenario from
// GitHub issue #7562: copying from an encrypted bucket to a temp bucket, then to another
// encrypted bucket fails with InternalError.
//
// Reproduction steps:
// 1. Create source bucket with SSE-S3 encryption enabled
// 2. Upload object (automatically encrypted)
// 3. Create temp bucket (no encryption)
// 4. Copy object from source to temp (decrypts)
// 5. Delete source bucket
// 6. Create destination bucket with SSE-S3 encryption
// 7. Copy object from temp to dest (should re-encrypt) - THIS FAILS
func TestGitHub7562CopyFromEncryptedToTempToEncrypted(t *testing.T) {
ctx := context.Background()
client, err := createS3Client(ctx, defaultConfig)
require.NoError(t, err, "Failed to create S3 client")
// Create three buckets
srcBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-src-")
require.NoError(t, err, "Failed to create source bucket")
tempBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-temp-")
require.NoError(t, err, "Failed to create temp bucket")
destBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-dest-")
require.NoError(t, err, "Failed to create destination bucket")
// Cleanup at the end
defer func() {
// Clean up in reverse order of creation
cleanupTestBucket(ctx, client, destBucket)
cleanupTestBucket(ctx, client, tempBucket)
// Note: srcBucket is deleted during the test
}()
testData := []byte("Test data for GitHub issue #7562 - copy from encrypted to temp to encrypted bucket")
objectKey := "demo-file.txt"
t.Logf("[1] Creating source bucket with SSE-S3 default encryption: %s", srcBucket)
// Step 1: Enable SSE-S3 default encryption on source bucket
_, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(srcBucket),
ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
Rules: []types.ServerSideEncryptionRule{
{
ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
SSEAlgorithm: types.ServerSideEncryptionAes256,
},
},
},
},
})
require.NoError(t, err, "Failed to set source bucket default encryption")
t.Log("[2] Uploading demo object to source bucket")
// Step 2: Upload object to source bucket (will be automatically encrypted)
_, err = client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(srcBucket),
Key: aws.String(objectKey),
Body: bytes.NewReader(testData),
// No encryption header - bucket default applies
})
require.NoError(t, err, "Failed to upload to source bucket")
// Verify source object is encrypted
srcHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(srcBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD source object")
assert.Equal(t, types.ServerSideEncryptionAes256, srcHead.ServerSideEncryption,
"Source object should be SSE-S3 encrypted")
t.Logf("Source object encryption: %v", srcHead.ServerSideEncryption)
t.Logf("[3] Creating temp bucket (no encryption): %s", tempBucket)
// Temp bucket already created without encryption
t.Log("[4] Copying object from source to temp (should decrypt)")
// Step 4: Copy to temp bucket (no encryption = decrypts)
_, err = client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(tempBucket),
Key: aws.String(objectKey),
CopySource: aws.String(fmt.Sprintf("%s/%s", srcBucket, objectKey)),
// No encryption header - data stored unencrypted
})
require.NoError(t, err, "Failed to copy to temp bucket")
// Verify temp object is NOT encrypted
tempHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(tempBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD temp object")
assert.Empty(t, tempHead.ServerSideEncryption, "Temp object should NOT be encrypted")
t.Logf("Temp object encryption: %v (should be empty)", tempHead.ServerSideEncryption)
// Verify temp object content
tempGet, err := client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(tempBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to GET temp object")
tempData, err := io.ReadAll(tempGet.Body)
tempGet.Body.Close()
require.NoError(t, err, "Failed to read temp object")
assertDataEqual(t, testData, tempData, "Temp object data should match original")
t.Log("[5] Deleting original source bucket")
// Step 5: Delete source bucket
err = cleanupTestBucket(ctx, client, srcBucket)
require.NoError(t, err, "Failed to delete source bucket")
t.Logf("[6] Creating destination bucket with SSE-S3 encryption: %s", destBucket)
// Step 6: Enable SSE-S3 default encryption on destination bucket
_, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(destBucket),
ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
Rules: []types.ServerSideEncryptionRule{
{
ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
SSEAlgorithm: types.ServerSideEncryptionAes256,
},
},
},
},
})
require.NoError(t, err, "Failed to set destination bucket default encryption")
t.Log("[7] Copying object from temp to dest (should re-encrypt) - THIS IS WHERE #7562 FAILS")
// Step 7: Copy from temp to dest bucket (should re-encrypt with SSE-S3)
// THIS IS THE STEP THAT FAILS IN GITHUB ISSUE #7562
_, err = client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
CopySource: aws.String(fmt.Sprintf("%s/%s", tempBucket, objectKey)),
// No encryption header - bucket default should apply
})
require.NoError(t, err, "GitHub #7562: Failed to copy from temp to encrypted dest bucket")
// Verify destination object is encrypted
destHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD destination object")
assert.Equal(t, types.ServerSideEncryptionAes256, destHead.ServerSideEncryption,
"Destination object should be SSE-S3 encrypted via bucket default")
t.Logf("Destination object encryption: %v", destHead.ServerSideEncryption)
// Verify destination object content is correct
destGet, err := client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to GET destination object")
destData, err := io.ReadAll(destGet.Body)
destGet.Body.Close()
require.NoError(t, err, "Failed to read destination object")
assertDataEqual(t, testData, destData, "GitHub #7562: Destination object data mismatch after re-encryption")
t.Log("[done] GitHub #7562 reproduction test completed successfully!")
}
// TestGitHub7562SimpleScenario tests the simpler variant: just copy unencrypted to encrypted bucket
func TestGitHub7562SimpleScenario(t *testing.T) {
ctx := context.Background()
client, err := createS3Client(ctx, defaultConfig)
require.NoError(t, err, "Failed to create S3 client")
// Create two buckets
srcBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-simple-src-")
require.NoError(t, err, "Failed to create source bucket")
defer cleanupTestBucket(ctx, client, srcBucket)
destBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-simple-dest-")
require.NoError(t, err, "Failed to create destination bucket")
defer cleanupTestBucket(ctx, client, destBucket)
testData := []byte("Simple test for unencrypted to encrypted copy")
objectKey := "test-object.txt"
t.Logf("Source bucket (no encryption): %s", srcBucket)
t.Logf("Dest bucket (SSE-S3 default): %s", destBucket)
// Upload to unencrypted source bucket
_, err = client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(srcBucket),
Key: aws.String(objectKey),
Body: bytes.NewReader(testData),
})
require.NoError(t, err, "Failed to upload to source bucket")
// Enable SSE-S3 on destination bucket
_, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(destBucket),
ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
Rules: []types.ServerSideEncryptionRule{
{
ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
SSEAlgorithm: types.ServerSideEncryptionAes256,
},
},
},
},
})
require.NoError(t, err, "Failed to set dest bucket encryption")
// Copy to encrypted bucket (should use bucket default encryption)
_, err = client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
CopySource: aws.String(fmt.Sprintf("%s/%s", srcBucket, objectKey)),
})
require.NoError(t, err, "Failed to copy to encrypted bucket")
// Verify destination is encrypted
destHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD dest object")
assert.Equal(t, types.ServerSideEncryptionAes256, destHead.ServerSideEncryption,
"Object should be encrypted via bucket default")
// Verify content
destGet, err := client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to GET dest object")
destData, err := io.ReadAll(destGet.Body)
destGet.Body.Close()
require.NoError(t, err, "Failed to read dest object")
assertDataEqual(t, testData, destData, "Data mismatch")
}
// TestGitHub7562DebugMetadata helps debug what metadata is present on objects at each step
func TestGitHub7562DebugMetadata(t *testing.T) {
ctx := context.Background()
client, err := createS3Client(ctx, defaultConfig)
require.NoError(t, err, "Failed to create S3 client")
// Create three buckets
srcBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-debug-src-")
require.NoError(t, err, "Failed to create source bucket")
tempBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-debug-temp-")
require.NoError(t, err, "Failed to create temp bucket")
destBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-debug-dest-")
require.NoError(t, err, "Failed to create destination bucket")
defer func() {
cleanupTestBucket(ctx, client, destBucket)
cleanupTestBucket(ctx, client, tempBucket)
}()
testData := []byte("Debug metadata test for GitHub #7562")
objectKey := "debug-file.txt"
// Enable SSE-S3 on source
_, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(srcBucket),
ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
Rules: []types.ServerSideEncryptionRule{
{
ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
SSEAlgorithm: types.ServerSideEncryptionAes256,
},
},
},
},
})
require.NoError(t, err, "Failed to set source bucket encryption")
// Upload
_, err = client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(srcBucket),
Key: aws.String(objectKey),
Body: bytes.NewReader(testData),
})
require.NoError(t, err, "Failed to upload")
// Log source object headers
srcHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(srcBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD source")
t.Logf("=== SOURCE OBJECT (encrypted) ===")
t.Logf("ServerSideEncryption: %v", srcHead.ServerSideEncryption)
t.Logf("Metadata: %v", srcHead.Metadata)
t.Logf("ContentLength: %d", aws.ToInt64(srcHead.ContentLength))
// Copy to temp
_, err = client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(tempBucket),
Key: aws.String(objectKey),
CopySource: aws.String(fmt.Sprintf("%s/%s", srcBucket, objectKey)),
})
require.NoError(t, err, "Failed to copy to temp")
// Log temp object headers
tempHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(tempBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD temp")
t.Logf("=== TEMP OBJECT (should be unencrypted) ===")
t.Logf("ServerSideEncryption: %v (should be empty)", tempHead.ServerSideEncryption)
t.Logf("Metadata: %v", tempHead.Metadata)
t.Logf("ContentLength: %d", aws.ToInt64(tempHead.ContentLength))
// Verify temp is NOT encrypted
if tempHead.ServerSideEncryption != "" {
t.Logf("WARNING: Temp object unexpectedly has encryption: %v", tempHead.ServerSideEncryption)
}
// Delete source bucket
cleanupTestBucket(ctx, client, srcBucket)
// Enable SSE-S3 on dest
_, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(destBucket),
ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
Rules: []types.ServerSideEncryptionRule{
{
ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
SSEAlgorithm: types.ServerSideEncryptionAes256,
},
},
},
},
})
require.NoError(t, err, "Failed to set dest bucket encryption")
// Copy to dest - THIS IS WHERE #7562 FAILS
t.Log("=== COPYING TO ENCRYPTED DEST ===")
_, err = client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
CopySource: aws.String(fmt.Sprintf("%s/%s", tempBucket, objectKey)),
})
if err != nil {
t.Logf("!!! COPY FAILED (GitHub #7562): %v", err)
t.FailNow()
}
// Log dest object headers
destHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD dest")
t.Logf("=== DEST OBJECT (should be encrypted) ===")
t.Logf("ServerSideEncryption: %v", destHead.ServerSideEncryption)
t.Logf("Metadata: %v", destHead.Metadata)
t.Logf("ContentLength: %d", aws.ToInt64(destHead.ContentLength))
// Verify dest IS encrypted
assert.Equal(t, types.ServerSideEncryptionAes256, destHead.ServerSideEncryption,
"Dest object should be encrypted")
// Verify content is readable
destGet, err := client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to GET dest")
destData, err := io.ReadAll(destGet.Body)
destGet.Body.Close()
require.NoError(t, err, "Failed to read dest")
assertDataEqual(t, testData, destData, "Data mismatch")
t.Log("=== DEBUG TEST PASSED ===")
}
// TestGitHub7562LargeFile tests the issue with larger files that might trigger multipart handling
func TestGitHub7562LargeFile(t *testing.T) {
ctx := context.Background()
client, err := createS3Client(ctx, defaultConfig)
require.NoError(t, err, "Failed to create S3 client")
srcBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-large-src-")
require.NoError(t, err, "Failed to create source bucket")
tempBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-large-temp-")
require.NoError(t, err, "Failed to create temp bucket")
destBucket, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"7562-large-dest-")
require.NoError(t, err, "Failed to create destination bucket")
defer func() {
cleanupTestBucket(ctx, client, destBucket)
cleanupTestBucket(ctx, client, tempBucket)
}()
// Use larger file to potentially trigger different code paths
testData := generateTestData(5 * 1024 * 1024) // 5MB
objectKey := "large-file.bin"
t.Logf("Testing with %d byte file", len(testData))
// Enable SSE-S3 on source
_, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(srcBucket),
ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
Rules: []types.ServerSideEncryptionRule{
{
ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
SSEAlgorithm: types.ServerSideEncryptionAes256,
},
},
},
},
})
require.NoError(t, err, "Failed to set source bucket encryption")
// Upload
_, err = client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(srcBucket),
Key: aws.String(objectKey),
Body: bytes.NewReader(testData),
})
require.NoError(t, err, "Failed to upload")
// Copy to temp (decrypt)
_, err = client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(tempBucket),
Key: aws.String(objectKey),
CopySource: aws.String(fmt.Sprintf("%s/%s", srcBucket, objectKey)),
})
require.NoError(t, err, "Failed to copy to temp")
// Delete source
cleanupTestBucket(ctx, client, srcBucket)
// Enable SSE-S3 on dest
_, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(destBucket),
ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{
Rules: []types.ServerSideEncryptionRule{
{
ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{
SSEAlgorithm: types.ServerSideEncryptionAes256,
},
},
},
},
})
require.NoError(t, err, "Failed to set dest bucket encryption")
// Copy to dest (re-encrypt) - GitHub #7562
_, err = client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
CopySource: aws.String(fmt.Sprintf("%s/%s", tempBucket, objectKey)),
})
require.NoError(t, err, "GitHub #7562: Large file copy to encrypted bucket failed")
// Verify
destHead, err := client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to HEAD dest")
assert.Equal(t, types.ServerSideEncryptionAes256, destHead.ServerSideEncryption)
assert.Equal(t, int64(len(testData)), aws.ToInt64(destHead.ContentLength))
// Verify content
destGet, err := client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(objectKey),
})
require.NoError(t, err, "Failed to GET dest")
destData, err := io.ReadAll(destGet.Body)
destGet.Body.Close()
require.NoError(t, err, "Failed to read dest")
assertDataEqual(t, testData, destData, "Large file data mismatch")
t.Log("Large file test passed!")
}

2
test/tus/tus_integration_test.go

@ -173,7 +173,7 @@ func startTestCluster(t *testing.T, ctx context.Context) (*TestCluster, error) {
// Wait a bit more for the cluster to fully stabilize // Wait a bit more for the cluster to fully stabilize
// Volumes are created lazily, and we need to ensure the master topology is ready // Volumes are created lazily, and we need to ensure the master topology is ready
time.Sleep(2 * time.Second)
time.Sleep(5 * time.Second)
return cluster, nil return cluster, nil
} }

277
weed/s3api/s3api_object_handlers_copy.go

@ -167,6 +167,14 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
} }
// Copy extended attributes from source, filtering out conflicting encryption metadata // Copy extended attributes from source, filtering out conflicting encryption metadata
// Pre-compute encryption state once for efficiency
srcHasSSEC := IsSSECEncrypted(entry.Extended)
srcHasSSEKMS := IsSSEKMSEncrypted(entry.Extended)
srcHasSSES3 := IsSSES3EncryptedInternal(entry.Extended)
dstWantsSSEC := IsSSECRequest(r)
dstWantsSSEKMS := IsSSEKMSRequest(r)
dstWantsSSES3 := IsSSES3RequestInternal(r)
for k, v := range entry.Extended { for k, v := range entry.Extended {
// Skip encryption-specific headers that might conflict with destination encryption type // Skip encryption-specific headers that might conflict with destination encryption type
skipHeader := false skipHeader := false
@ -177,17 +185,9 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
skipHeader = true skipHeader = true
} }
// If we're doing cross-encryption, skip conflicting headers
if !skipHeader && len(entry.GetChunks()) > 0 {
// Detect source and destination encryption types
srcHasSSEC := IsSSECEncrypted(entry.Extended)
srcHasSSEKMS := IsSSEKMSEncrypted(entry.Extended)
srcHasSSES3 := IsSSES3EncryptedInternal(entry.Extended)
dstWantsSSEC := IsSSECRequest(r)
dstWantsSSEKMS := IsSSEKMSRequest(r)
dstWantsSSES3 := IsSSES3RequestInternal(r)
// Use helper function to determine if header should be skipped
// Filter conflicting headers for cross-encryption or encrypted→unencrypted copies
// This applies to both inline files (no chunks) and chunked files - fixes GitHub #7562
if !skipHeader {
skipHeader = shouldSkipEncryptionHeader(k, skipHeader = shouldSkipEncryptionHeader(k,
srcHasSSEC, srcHasSSEKMS, srcHasSSES3, srcHasSSEC, srcHasSSEKMS, srcHasSSES3,
dstWantsSSEC, dstWantsSSEKMS, dstWantsSSES3) dstWantsSSEC, dstWantsSSEKMS, dstWantsSSES3)
@ -210,10 +210,31 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
dstEntry.Extended[k] = v dstEntry.Extended[k] = v
} }
// For zero-size files or files without chunks, use the original approach
// For zero-size files or files without chunks, handle inline content
// This includes encrypted inline files that need decryption/re-encryption
if entry.Attributes.FileSize == 0 || len(entry.GetChunks()) == 0 { if entry.Attributes.FileSize == 0 || len(entry.GetChunks()) == 0 {
// Just copy the entry structure without chunks for zero-size files
dstEntry.Chunks = nil dstEntry.Chunks = nil
// Handle inline encrypted content - fixes GitHub #7562
if len(entry.Content) > 0 {
inlineContent, inlineMetadata, inlineErr := s3a.processInlineContentForCopy(
entry, r, dstBucket, dstObject,
srcHasSSEC, srcHasSSEKMS, srcHasSSES3,
dstWantsSSEC, dstWantsSSEKMS, dstWantsSSES3)
if inlineErr != nil {
glog.Errorf("CopyObjectHandler inline content error: %v", inlineErr)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
dstEntry.Content = inlineContent
// Apply inline destination metadata
if inlineMetadata != nil {
for k, v := range inlineMetadata {
dstEntry.Extended[k] = v
}
}
}
} else { } else {
// Use unified copy strategy approach // Use unified copy strategy approach
dstChunks, dstMetadata, copyErr := s3a.executeUnifiedCopyStrategy(entry, r, dstBucket, srcObject, dstObject) dstChunks, dstMetadata, copyErr := s3a.executeUnifiedCopyStrategy(entry, r, dstBucket, srcObject, dstObject)
@ -2405,3 +2426,233 @@ func shouldSkipEncryptionHeader(headerKey string,
// Default: don't skip the header // Default: don't skip the header
return false return false
} }
// processInlineContentForCopy handles encryption/decryption for inline content during copy
// This fixes GitHub #7562 where small files stored inline weren't properly decrypted/re-encrypted
func (s3a *S3ApiServer) processInlineContentForCopy(
entry *filer_pb.Entry, r *http.Request, dstBucket, dstObject string,
srcSSEC, srcSSEKMS, srcSSES3 bool,
dstSSEC, dstSSEKMS, dstSSES3 bool) ([]byte, map[string][]byte, error) {
content := entry.Content
var dstMetadata map[string][]byte
// Check if source is encrypted and needs decryption
srcEncrypted := srcSSEC || srcSSEKMS || srcSSES3
// Check if destination needs encryption (explicit request or bucket default)
dstNeedsEncryption := dstSSEC || dstSSEKMS || dstSSES3
if !dstNeedsEncryption {
// Check bucket default encryption
bucketMetadata, err := s3a.getBucketMetadata(dstBucket)
if err == nil && bucketMetadata != nil && bucketMetadata.Encryption != nil {
switch bucketMetadata.Encryption.SseAlgorithm {
case "aws:kms":
dstSSEKMS = true
dstNeedsEncryption = true
case "AES256":
dstSSES3 = true
dstNeedsEncryption = true
}
}
}
// Decrypt source content if encrypted
if srcEncrypted {
decryptedContent, decErr := s3a.decryptInlineContent(entry, srcSSEC, srcSSEKMS, srcSSES3, r)
if decErr != nil {
return nil, nil, fmt.Errorf("failed to decrypt inline content: %w", decErr)
}
content = decryptedContent
glog.V(3).Infof("Decrypted inline content: %d bytes", len(content))
}
// Re-encrypt if destination needs encryption
if dstNeedsEncryption {
encryptedContent, encMetadata, encErr := s3a.encryptInlineContent(content, dstBucket, dstObject, dstSSEC, dstSSEKMS, dstSSES3, r)
if encErr != nil {
return nil, nil, fmt.Errorf("failed to encrypt inline content: %w", encErr)
}
content = encryptedContent
dstMetadata = encMetadata
glog.V(3).Infof("Encrypted inline content: %d bytes", len(content))
}
return content, dstMetadata, nil
}
// decryptInlineContent decrypts inline content from an encrypted source
func (s3a *S3ApiServer) decryptInlineContent(entry *filer_pb.Entry, srcSSEC, srcSSEKMS, srcSSES3 bool, r *http.Request) ([]byte, error) {
content := entry.Content
if srcSSES3 {
// Get SSE-S3 key from metadata
keyData, exists := entry.Extended[s3_constants.SeaweedFSSSES3Key]
if !exists {
return nil, fmt.Errorf("SSE-S3 key not found in metadata")
}
keyManager := GetSSES3KeyManager()
sseKey, err := DeserializeSSES3Metadata(keyData, keyManager)
if err != nil {
return nil, fmt.Errorf("failed to deserialize SSE-S3 key: %w", err)
}
// Get IV
iv := sseKey.IV
if len(iv) == 0 {
return nil, fmt.Errorf("SSE-S3 IV not found")
}
// Decrypt content
decryptedReader, err := CreateSSES3DecryptedReader(bytes.NewReader(content), sseKey, iv)
if err != nil {
return nil, fmt.Errorf("failed to create SSE-S3 decrypted reader: %w", err)
}
return io.ReadAll(decryptedReader)
} else if srcSSEKMS {
// Get SSE-KMS key from metadata
keyData, exists := entry.Extended[s3_constants.SeaweedFSSSEKMSKey]
if !exists {
return nil, fmt.Errorf("SSE-KMS key not found in metadata")
}
sseKey, err := DeserializeSSEKMSMetadata(keyData)
if err != nil {
return nil, fmt.Errorf("failed to deserialize SSE-KMS key: %w", err)
}
// Decrypt content
decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(content), sseKey)
if err != nil {
return nil, fmt.Errorf("failed to create SSE-KMS decrypted reader: %w", err)
}
return io.ReadAll(decryptedReader)
} else if srcSSEC {
// Get SSE-C key from request headers
sourceKey, err := ParseSSECCopySourceHeaders(r)
if err != nil {
return nil, fmt.Errorf("failed to parse SSE-C copy source headers: %w", err)
}
// Get IV from metadata
iv, err := GetSSECIVFromMetadata(entry.Extended)
if err != nil {
return nil, fmt.Errorf("failed to get SSE-C IV: %w", err)
}
// Decrypt content
decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(content), sourceKey, iv)
if err != nil {
return nil, fmt.Errorf("failed to create SSE-C decrypted reader: %w", err)
}
return io.ReadAll(decryptedReader)
}
// Source not encrypted, return as-is
return content, nil
}
// encryptInlineContent encrypts inline content for the destination
func (s3a *S3ApiServer) encryptInlineContent(content []byte, dstBucket, dstObject string,
dstSSEC, dstSSEKMS, dstSSES3 bool, r *http.Request) ([]byte, map[string][]byte, error) {
dstMetadata := make(map[string][]byte)
if dstSSES3 {
// Generate SSE-S3 key
keyManager := GetSSES3KeyManager()
key, err := keyManager.GetOrCreateKey("")
if err != nil {
return nil, nil, fmt.Errorf("failed to generate SSE-S3 key: %w", err)
}
// Encrypt content
encryptedReader, iv, err := CreateSSES3EncryptedReader(bytes.NewReader(content), key)
if err != nil {
return nil, nil, fmt.Errorf("failed to create SSE-S3 encrypted reader: %w", err)
}
encryptedContent, err := io.ReadAll(encryptedReader)
if err != nil {
return nil, nil, fmt.Errorf("failed to read encrypted content: %w", err)
}
// Store IV on key and serialize metadata
key.IV = iv
keyData, err := SerializeSSES3Metadata(key)
if err != nil {
return nil, nil, fmt.Errorf("failed to serialize SSE-S3 metadata: %w", err)
}
dstMetadata[s3_constants.SeaweedFSSSES3Key] = keyData
dstMetadata[s3_constants.AmzServerSideEncryption] = []byte("AES256")
return encryptedContent, dstMetadata, nil
} else if dstSSEKMS {
// Parse SSE-KMS headers
keyID, encryptionContext, bucketKeyEnabled, err := ParseSSEKMSCopyHeaders(r)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse SSE-KMS headers: %w", err)
}
// Build encryption context if needed
if encryptionContext == nil {
encryptionContext = BuildEncryptionContext(dstBucket, dstObject, bucketKeyEnabled)
}
// Encrypt content
encryptedReader, sseKey, err := CreateSSEKMSEncryptedReaderWithBucketKey(
bytes.NewReader(content), keyID, encryptionContext, bucketKeyEnabled)
if err != nil {
return nil, nil, fmt.Errorf("failed to create SSE-KMS encrypted reader: %w", err)
}
encryptedContent, err := io.ReadAll(encryptedReader)
if err != nil {
return nil, nil, fmt.Errorf("failed to read encrypted content: %w", err)
}
// Serialize metadata
keyData, err := SerializeSSEKMSMetadata(sseKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to serialize SSE-KMS metadata: %w", err)
}
dstMetadata[s3_constants.SeaweedFSSSEKMSKey] = keyData
dstMetadata[s3_constants.AmzServerSideEncryption] = []byte("aws:kms")
return encryptedContent, dstMetadata, nil
} else if dstSSEC {
// Parse SSE-C headers
destKey, err := ParseSSECHeaders(r)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse SSE-C headers: %w", err)
}
// Encrypt content
encryptedReader, iv, err := CreateSSECEncryptedReader(bytes.NewReader(content), destKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to create SSE-C encrypted reader: %w", err)
}
encryptedContent, err := io.ReadAll(encryptedReader)
if err != nil {
return nil, nil, fmt.Errorf("failed to read encrypted content: %w", err)
}
// Store IV in metadata
StoreSSECIVInMetadata(dstMetadata, iv)
dstMetadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256")
dstMetadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(destKey.KeyMD5)
return encryptedContent, dstMetadata, nil
}
// No encryption needed
return content, nil, nil
}
Loading…
Cancel
Save