Browse Source

S3: Truncate timestamps to milliseconds for CopyObjectResult and CopyPartResult (#8398)

* S3: Truncate timestamps to milliseconds for CopyObjectResult and CopyPartResult

Fixes #8394

* S3: Address nitpick comments in copy handlers

- Synchronize Mtime and LastModified by capturing time once\n- Optimize copyChunksForRange loop\n- Use built-in min/max\n- Remove dead previewLen code
master
Chris Lu 12 hours ago
committed by GitHub
parent
commit
8e25c55bfb
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 56
      weed/s3api/s3api_object_handlers_copy.go

56
weed/s3api/s3api_object_handlers_copy.go

@ -32,6 +32,7 @@ const (
)
func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
t := time.Now().UTC().Truncate(time.Millisecond)
dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
@ -83,7 +84,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
return
}
entry.Extended, err = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging)
entry.Attributes.Mtime = time.Now().Unix()
entry.Attributes.Mtime = t.Unix()
if err != nil {
glog.Errorf("CopyObjectHandler ValidateTags error %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
@ -96,7 +97,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
}
writeSuccessResponseXML(w, r, CopyObjectResult{
ETag: filer.ETag(entry),
LastModified: time.Now().UTC(),
LastModified: t,
})
return
}
@ -183,7 +184,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
dstEntry := &filer_pb.Entry{
Attributes: &filer_pb.FuseAttributes{
FileSize: entry.Attributes.FileSize,
Mtime: time.Now().Unix(),
Mtime: t.Unix(),
Crtime: entry.Attributes.Crtime,
Mime: entry.Attributes.Mime,
},
@ -402,7 +403,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
response := CopyObjectResult{
ETag: etag,
LastModified: time.Now().UTC(),
LastModified: t,
}
writeSuccessResponseXML(w, r, response)
@ -459,6 +460,7 @@ type CopyPartResult struct {
}
func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
t := time.Now().UTC().Truncate(time.Millisecond)
// https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
@ -587,8 +589,8 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
dstEntry := &filer_pb.Entry{
Attributes: &filer_pb.FuseAttributes{
FileSize: partSize,
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
Mtime: t.Unix(),
Crtime: t.Unix(),
Mime: entry.Attributes.Mime,
},
Extended: make(map[string][]byte),
@ -648,7 +650,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
response := CopyPartResult{
ETag: etag,
LastModified: time.Now().UTC(),
LastModified: t,
}
writeSuccessResponseXML(w, r, response)
@ -956,22 +958,6 @@ func (s3a *S3ApiServer) assignNewVolume(dstPath string) (*filer_pb.AssignVolumeR
return assignResult, nil
}
// min returns the minimum of two int64 values
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}
// max returns the maximum of two int64 values
func max(a, b int64) int64 {
if a > b {
return a
}
return b
}
// parseRangeHeader parses the x-amz-copy-source-range header
func parseRangeHeader(rangeHeader string) (startOffset, endOffset int64, err error) {
// Remove "bytes=" prefix if present
@ -997,6 +983,7 @@ func parseRangeHeader(rangeHeader string) (startOffset, endOffset int64, err err
// copyChunksForRange copies chunks that overlap with the specified range
func (s3a *S3ApiServer) copyChunksForRange(entry *filer_pb.Entry, startOffset, endOffset int64, dstPath string) ([]*filer_pb.FileChunk, error) {
var relevantChunks []*filer_pb.FileChunk
var originalChunks []*filer_pb.FileChunk
// Find chunks that overlap with the range
for _, chunk := range entry.GetChunks() {
@ -1021,6 +1008,7 @@ func (s3a *S3ApiServer) copyChunksForRange(entry *filer_pb.Entry, startOffset, e
Fid: chunk.Fid,
}
relevantChunks = append(relevantChunks, newChunk)
originalChunks = append(originalChunks, chunk)
}
}
@ -1030,20 +1018,6 @@ func (s3a *S3ApiServer) copyChunksForRange(entry *filer_pb.Entry, startOffset, e
executor := util.NewLimitedConcurrentExecutor(defaultChunkCopyConcurrency)
errChan := make(chan error, len(relevantChunks))
// Create a map to track original chunks for each relevant chunk
originalChunks := make([]*filer_pb.FileChunk, len(relevantChunks))
relevantIndex := 0
for _, chunk := range entry.GetChunks() {
chunkStart := chunk.Offset
chunkEnd := chunk.Offset + int64(chunk.Size)
// Check if chunk overlaps with the range
if chunkStart < endOffset+1 && chunkEnd > startOffset {
originalChunks[relevantIndex] = chunk
relevantIndex++
}
}
for i, chunk := range relevantChunks {
chunkIndex := i
originalChunk := originalChunks[i] // Get the corresponding original chunk
@ -1833,10 +1807,6 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
return nil, fmt.Errorf("decrypt SSE-C chunk data: %w", readErr)
}
finalData = decryptedData
previewLen := 16
if len(finalData) < previewLen {
previewLen = len(finalData)
}
} else if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS {
// Decrypt SSE-KMS source
@ -1859,10 +1829,6 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
return nil, fmt.Errorf("decrypt SSE-KMS chunk data: %w", readErr)
}
finalData = decryptedData
previewLen := 16
if len(finalData) < previewLen {
previewLen = len(finalData)
}
} else if chunk.GetSseType() == filer_pb.SSEType_SSE_S3 {
// Decrypt SSE-S3 source

Loading…
Cancel
Save