Browse Source

Fix SSE-S3 copy: preserve encryption metadata and set chunk SSE type

Fixes GitHub #7562: Copying objects between encrypted buckets was failing.

Root causes:
1. processMetadataBytes was re-adding SSE headers from source entry, undoing
   the encryption header filtering. Now uses dstEntry.Extended which is
   already filtered.

2. SSE-S3 streaming copy returned nil metadata. Now properly generates and
   returns SSE-S3 destination metadata (SeaweedFSSSES3Key, AES256 header)
   via ExecuteStreamingCopyWithMetadata.

3. Chunks created during streaming copy didn't have SseType set. Now sets
   SseType and per-chunk SseMetadata with chunk-specific IVs for SSE-S3,
   enabling proper decryption on GetObject.
pull/7598/head
chrislu 4 weeks ago
parent
commit
43fc623e3e
  1. 4
      weed/s3api/s3api_object_handlers_copy.go
  2. 36
      weed/s3api/s3api_object_handlers_copy_unified.go
  3. 59
      weed/s3api/s3api_streaming_copy.go

4
weed/s3api/s3api_object_handlers_copy.go

@ -199,7 +199,9 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
} }
// Process metadata and tags and apply to destination // Process metadata and tags and apply to destination
processedMetadata, tagErr := processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging)
// Use dstEntry.Extended (already filtered) as the source, not entry.Extended,
// to preserve the encryption header filtering. Fixes GitHub #7562.
processedMetadata, tagErr := processMetadataBytes(r.Header, dstEntry.Extended, replaceMeta, replaceTagging)
if tagErr != nil { if tagErr != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return

36
weed/s3api/s3api_object_handlers_copy_unified.go

@ -8,6 +8,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
weed_server "github.com/seaweedfs/seaweedfs/weed/server" weed_server "github.com/seaweedfs/seaweedfs/weed/server"
) )
@ -134,8 +135,29 @@ func (s3a *S3ApiServer) executeEncryptCopy(entry *filer_pb.Entry, r *http.Reques
if state.DstSSES3 { if state.DstSSES3 {
// Use streaming copy for SSE-S3 encryption // Use streaming copy for SSE-S3 encryption
chunks, err := s3a.executeStreamingReencryptCopy(entry, r, state, dstPath)
return chunks, nil, err
chunks, encSpec, err := s3a.executeStreamingReencryptCopyWithMetadata(entry, r, state, dstPath)
if err != nil {
return nil, nil, err
}
// Generate SSE-S3 destination metadata from the encryption spec
dstMetadata := make(map[string][]byte)
if encSpec != nil && encSpec.DestinationKey != nil {
if sseKey, ok := encSpec.DestinationKey.(*SSES3Key); ok {
// Store the IV on the key before serialization
if len(encSpec.DestinationIV) > 0 {
sseKey.IV = encSpec.DestinationIV
}
if keyData, serErr := SerializeSSES3Metadata(sseKey); serErr == nil {
dstMetadata[s3_constants.SeaweedFSSSES3Key] = keyData
dstMetadata[s3_constants.AmzServerSideEncryption] = []byte("AES256")
glog.V(3).Infof("Generated SSE-S3 metadata for streaming encrypt copy: %s", dstPath)
} else {
glog.Errorf("Failed to serialize SSE-S3 metadata: %v", serErr)
}
}
}
return chunks, dstMetadata, nil
} }
return nil, nil, fmt.Errorf("unknown target encryption type") return nil, nil, fmt.Errorf("unknown target encryption type")
@ -257,3 +279,13 @@ func (s3a *S3ApiServer) executeStreamingReencryptCopy(entry *filer_pb.Entry, r *
// Execute streaming copy // Execute streaming copy
return streamingManager.ExecuteStreamingCopy(context.Background(), entry, r, dstPath, state) return streamingManager.ExecuteStreamingCopy(context.Background(), entry, r, dstPath, state)
} }
// executeStreamingReencryptCopyWithMetadata performs streaming re-encryption copy and returns encryption spec
// This is needed for SSE-S3 to properly set destination metadata (fixes GitHub #7562)
func (s3a *S3ApiServer) executeStreamingReencryptCopyWithMetadata(entry *filer_pb.Entry, r *http.Request, state *EncryptionState, dstPath string) ([]*filer_pb.FileChunk, *EncryptionSpec, error) {
// Create streaming copy manager
streamingManager := NewStreamingCopyManager(s3a)
// Execute streaming copy with metadata
return streamingManager.ExecuteStreamingCopyWithMetadata(context.Background(), entry, r, dstPath, state)
}

59
weed/s3api/s3api_streaming_copy.go

@ -61,16 +61,23 @@ func NewStreamingCopyManager(s3a *S3ApiServer) *StreamingCopyManager {
// ExecuteStreamingCopy performs a streaming copy operation // ExecuteStreamingCopy performs a streaming copy operation
func (scm *StreamingCopyManager) ExecuteStreamingCopy(ctx context.Context, entry *filer_pb.Entry, r *http.Request, dstPath string, state *EncryptionState) ([]*filer_pb.FileChunk, error) { func (scm *StreamingCopyManager) ExecuteStreamingCopy(ctx context.Context, entry *filer_pb.Entry, r *http.Request, dstPath string, state *EncryptionState) ([]*filer_pb.FileChunk, error) {
chunks, _, err := scm.ExecuteStreamingCopyWithMetadata(ctx, entry, r, dstPath, state)
return chunks, err
}
// ExecuteStreamingCopyWithMetadata performs a streaming copy operation and returns the encryption spec
// This is needed for SSE-S3 to properly set destination metadata (fixes GitHub #7562)
func (scm *StreamingCopyManager) ExecuteStreamingCopyWithMetadata(ctx context.Context, entry *filer_pb.Entry, r *http.Request, dstPath string, state *EncryptionState) ([]*filer_pb.FileChunk, *EncryptionSpec, error) {
// Create streaming copy specification // Create streaming copy specification
spec, err := scm.createStreamingSpec(entry, r, state) spec, err := scm.createStreamingSpec(entry, r, state)
if err != nil { if err != nil {
return nil, fmt.Errorf("create streaming spec: %w", err)
return nil, nil, fmt.Errorf("create streaming spec: %w", err)
} }
// Create source reader from entry // Create source reader from entry
sourceReader, err := scm.createSourceReader(entry) sourceReader, err := scm.createSourceReader(entry)
if err != nil { if err != nil {
return nil, fmt.Errorf("create source reader: %w", err)
return nil, nil, fmt.Errorf("create source reader: %w", err)
} }
defer sourceReader.Close() defer sourceReader.Close()
@ -79,11 +86,16 @@ func (scm *StreamingCopyManager) ExecuteStreamingCopy(ctx context.Context, entry
// Create processing pipeline // Create processing pipeline
processedReader, err := scm.createProcessingPipeline(spec) processedReader, err := scm.createProcessingPipeline(spec)
if err != nil { if err != nil {
return nil, fmt.Errorf("create processing pipeline: %w", err)
return nil, nil, fmt.Errorf("create processing pipeline: %w", err)
} }
// Stream to destination // Stream to destination
return scm.streamToDestination(ctx, processedReader, spec, dstPath)
chunks, err := scm.streamToDestination(ctx, processedReader, spec, dstPath)
if err != nil {
return nil, nil, err
}
return chunks, spec.EncryptionSpec, nil
} }
// createStreamingSpec creates a streaming specification based on copy parameters // createStreamingSpec creates a streaming specification based on copy parameters
@ -453,8 +465,8 @@ func (scm *StreamingCopyManager) streamToChunks(ctx context.Context, reader io.R
for { for {
n, err := reader.Read(buffer) n, err := reader.Read(buffer)
if n > 0 { if n > 0 {
// Create chunk for this data
chunk, chunkErr := scm.createChunkFromData(buffer[:n], offset, dstPath)
// Create chunk for this data, passing encryption spec for SSE type
chunk, chunkErr := scm.createChunkFromData(buffer[:n], offset, dstPath, spec.EncryptionSpec)
if chunkErr != nil { if chunkErr != nil {
return nil, fmt.Errorf("create chunk from data: %w", chunkErr) return nil, fmt.Errorf("create chunk from data: %w", chunkErr)
} }
@ -474,7 +486,7 @@ func (scm *StreamingCopyManager) streamToChunks(ctx context.Context, reader io.R
} }
// createChunkFromData creates a chunk from streaming data // createChunkFromData creates a chunk from streaming data
func (scm *StreamingCopyManager) createChunkFromData(data []byte, offset int64, dstPath string) (*filer_pb.FileChunk, error) {
func (scm *StreamingCopyManager) createChunkFromData(data []byte, offset int64, dstPath string, encSpec *EncryptionSpec) (*filer_pb.FileChunk, error) {
// Assign new volume // Assign new volume
assignResult, err := scm.s3a.assignNewVolume(dstPath) assignResult, err := scm.s3a.assignNewVolume(dstPath)
if err != nil { if err != nil {
@ -487,6 +499,39 @@ func (scm *StreamingCopyManager) createChunkFromData(data []byte, offset int64,
Size: uint64(len(data)), Size: uint64(len(data)),
} }
// Set SSE type and metadata on chunk if destination is encrypted
// This is critical for GetObject to know to decrypt the data - fixes GitHub #7562
if encSpec != nil && encSpec.NeedsEncryption {
switch encSpec.DestinationType {
case EncryptionTypeSSEC:
chunk.SseType = filer_pb.SSEType_SSE_C
// SSE-C metadata is handled at object level, not per-chunk for streaming copy
case EncryptionTypeSSEKMS:
chunk.SseType = filer_pb.SSEType_SSE_KMS
// SSE-KMS metadata is handled at object level, not per-chunk for streaming copy
case EncryptionTypeSSES3:
chunk.SseType = filer_pb.SSEType_SSE_S3
// Create per-chunk SSE-S3 metadata with chunk-specific IV
if sseKey, ok := encSpec.DestinationKey.(*SSES3Key); ok {
// Calculate chunk-specific IV using base IV and chunk offset
baseIV := encSpec.DestinationIV
if len(baseIV) > 0 {
chunkIV, _ := calculateIVWithOffset(baseIV, offset)
// Create chunk key with the chunk-specific IV
chunkSSEKey := &SSES3Key{
Key: sseKey.Key,
KeyID: sseKey.KeyID,
Algorithm: sseKey.Algorithm,
IV: chunkIV,
}
if chunkMetadata, serErr := SerializeSSES3Metadata(chunkSSEKey); serErr == nil {
chunk.SseMetadata = chunkMetadata
}
}
}
}
}
// Set file ID // Set file ID
if err := scm.s3a.setChunkFileId(chunk, assignResult); err != nil { if err := scm.s3a.setChunkFileId(chunk, assignResult); err != nil {
return nil, err return nil, err

Loading…
Cancel
Save