Browse Source

s3api: fix volume assignment failure in copy operations (#8128)

Recent changes in the S3 unified copy strategy were constructing source
and destination paths without the necessary BucketsPath prefix (typically
/buckets). When these paths reached the Filer for volume assignment, it
failed to resolve the correct collection and storage rules, defaulting to
a disk type with no available capacity.

This fix ensures all relevant paths in S3 copy handlers include the
correct BucketsPath prefix for proper collection resolution.

Fixes replication issue with Harbor upload via S3 API.
pull/6045/merge
Chris Lu 3 days ago
committed by GitHub
parent
commit
a72e269a2e
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 18
      weed/s3api/s3api_object_handlers_copy.go
  2. 8
      weed/s3api/s3api_object_handlers_copy_unified.go

18
weed/s3api/s3api_object_handlers_copy.go

@ -2167,7 +2167,7 @@ func (s3a *S3ApiServer) copyChunkWithReencryption(chunk *filer_pb.FileChunk, cop
// copyChunksWithSSEKMS handles SSE-KMS aware copying with smart fast/slow path selection
// Returns chunks and destination metadata like SSE-C for consistency
func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Request, bucket string) ([]*filer_pb.FileChunk, map[string][]byte, error) {
func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Request, bucket string, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) {
// Parse SSE-KMS headers from copy request
destKeyID, encryptionContext, bucketKeyEnabled, err := ParseSSEKMSCopyHeaders(r)
@ -2188,13 +2188,13 @@ func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Requ
if isMultipartSSEKMS {
glog.V(2).Infof("Detected multipart SSE-KMS object with %d encrypted chunks for copy", sseKMSChunks)
return s3a.copyMultipartSSEKMSChunks(entry, destKeyID, encryptionContext, bucketKeyEnabled, r.URL.Path, bucket)
return s3a.copyMultipartSSEKMSChunks(entry, destKeyID, encryptionContext, bucketKeyEnabled, dstPath, bucket)
}
// Single-part SSE-KMS object: use existing logic
// If no SSE-KMS headers and source is not SSE-KMS encrypted, use regular copy
if destKeyID == "" && !IsSSEKMSEncrypted(entry.Extended) {
chunks, err := s3a.copyChunks(entry, r.URL.Path)
chunks, err := s3a.copyChunks(entry, dstPath)
return chunks, nil, err
}
@ -2215,19 +2215,19 @@ func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Requ
return nil, nil, err
}
glog.V(2).Infof("SSE-KMS copy strategy for %s: %v", r.URL.Path, strategy)
glog.V(2).Infof("SSE-KMS copy strategy for %s: %v", dstPath, strategy)
switch strategy {
case SSEKMSCopyStrategyDirect:
// FAST PATH: Direct chunk copy (same key or both unencrypted)
glog.V(2).Infof("Using fast path: direct chunk copy for %s", r.URL.Path)
chunks, err := s3a.copyChunks(entry, r.URL.Path)
glog.V(2).Infof("Using fast path: direct chunk copy for %s", dstPath)
chunks, err := s3a.copyChunks(entry, dstPath)
// For direct copy, generate destination metadata if we're encrypting to SSE-KMS
var dstMetadata map[string][]byte
if destKeyID != "" {
dstMetadata = make(map[string][]byte)
if encryptionContext == nil {
encryptionContext = BuildEncryptionContext(bucket, r.URL.Path, bucketKeyEnabled)
encryptionContext = BuildEncryptionContext(bucket, dstPath, bucketKeyEnabled)
}
sseKey := &SSEKMSKey{
KeyID: destKeyID,
@ -2245,8 +2245,8 @@ func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Requ
case SSEKMSCopyStrategyDecryptEncrypt:
// SLOW PATH: Decrypt source and re-encrypt for destination
glog.V(2).Infof("Using slow path: decrypt/re-encrypt for %s", r.URL.Path)
return s3a.copyChunksWithSSEKMSReencryption(entry, destKeyID, encryptionContext, bucketKeyEnabled, r.URL.Path, bucket)
glog.V(2).Infof("Using slow path: decrypt/re-encrypt for %s", dstPath)
return s3a.copyChunksWithSSEKMSReencryption(entry, destKeyID, encryptionContext, bucketKeyEnabled, dstPath, bucket)
default:
return nil, nil, fmt.Errorf("unknown SSE-KMS copy strategy: %v", strategy)

8
weed/s3api/s3api_object_handlers_copy_unified.go

@ -15,8 +15,8 @@ import (
// Returns chunks and destination metadata that should be applied to the destination entry
func (s3a *S3ApiServer) executeUnifiedCopyStrategy(entry *filer_pb.Entry, r *http.Request, dstBucket, srcObject, dstObject string) ([]*filer_pb.FileChunk, map[string][]byte, error) {
// Detect encryption state (using entry-aware detection for multipart objects)
srcPath := fmt.Sprintf("/%s/%s", r.Header.Get("X-Amz-Copy-Source-Bucket"), srcObject)
dstPath := fmt.Sprintf("/%s/%s", dstBucket, dstObject)
srcPath := fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, r.Header.Get("X-Amz-Copy-Source-Bucket"), srcObject)
dstPath := fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, dstBucket, dstObject)
state := DetectEncryptionStateWithEntry(entry, r, srcPath, dstPath)
// Debug logging for encryption state
@ -127,7 +127,7 @@ func (s3a *S3ApiServer) executeEncryptCopy(entry *filer_pb.Entry, r *http.Reques
if state.DstSSEKMS {
// Use existing SSE-KMS copy logic - metadata is now generated internally
chunks, dstMetadata, err := s3a.copyChunksWithSSEKMS(entry, r, dstBucket)
chunks, dstMetadata, err := s3a.copyChunksWithSSEKMS(entry, r, dstBucket, dstPath)
return chunks, dstMetadata, err
}
@ -160,7 +160,7 @@ func (s3a *S3ApiServer) executeReencryptCopy(entry *filer_pb.Entry, r *http.Requ
if state.SrcSSEKMS && state.DstSSEKMS {
// Use existing SSE-KMS copy logic - metadata is now generated internally
chunks, dstMetadata, err := s3a.copyChunksWithSSEKMS(entry, r, dstBucket)
chunks, dstMetadata, err := s3a.copyChunksWithSSEKMS(entry, r, dstBucket, dstPath)
return chunks, dstMetadata, err
}

Loading…
Cancel
Save