Browse Source

[s3] add s3 pass test_multipart_upload_size_too_small (#5475)

* add s3 pass test_multipart_upload_size_too_small

* refactor metric names

* return ErrNoSuchUpload if empty parts

* fix test
pull/5478/head
Konstantin Lebedev 9 months ago
committed by GitHub
parent
commit
3e25ed1b11
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 2
      .github/workflows/s3tests.yml
  2. 28
      weed/s3api/filer_multipart.go
  3. 5
      weed/stats/metrics_names.go

2
.github/workflows/s3tests.yml

@ -169,6 +169,7 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_contents \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_size_too_small \
s3tests_boto3/functional/test_s3.py::test_abort_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_list_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_atomic_read_1mb \
@ -181,7 +182,6 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_4mb \
s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_8mb \
s3tests_boto3/functional/test_s3.py::test_atomic_multipart_upload_write \
s3tests_boto3/functional/test_s3.py::test_multipart_resend_first_finishes_last \
s3tests_boto3/functional/test_s3.py::test_ranged_request_response_code \
s3tests_boto3/functional/test_s3.py::test_ranged_big_request_response_code \
s3tests_boto3/functional/test_s3.py::test_ranged_request_skip_leading_bytes_response_code \

28
weed/s3api/filer_multipart.go

@ -25,7 +25,10 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
const multipartExt = ".part"
const (
multipartExt = ".part"
multiPartMinSize = 5 * 1024 * 1024
)
type InitiateMultipartUploadResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"`
@ -75,6 +78,10 @@ type CompleteMultipartUploadResult struct {
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
glog.V(2).Infof("completeMultipartUpload input %v", input)
if len(parts.Parts) == 0 {
stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
return nil, s3err.ErrNoSuchUpload
}
completedPartNumbers := []int{}
completedPartMap := make(map[int][]string)
for _, part := range parts.Parts {
@ -83,8 +90,9 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
completedPartMap[part.PartNumber] = append(completedPartMap[part.PartNumber], part.ETag)
}
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
sort.Ints(completedPartNumbers)
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList)
if err != nil {
glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
@ -118,6 +126,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
deleteEntries := []*filer_pb.Entry{}
partEntries := make(map[int][]*filer_pb.Entry, len(entries))
entityTooSmall := false
for _, entry := range entries {
foundEntry := false
glog.V(4).Infof("completeMultipartUpload part entries %s", entry.Name)
@ -156,16 +165,23 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
partEntries[partNumber] = append(partEntries[partNumber], entry)
foundEntry = true
}
if !foundEntry {
if foundEntry {
if len(completedPartNumbers) > 1 && partNumber != completedPartNumbers[len(completedPartNumbers)-1] &&
entry.Attributes.FileSize < multiPartMinSize {
glog.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name)
entityTooSmall = true
}
} else {
deleteEntries = append(deleteEntries, entry)
}
}
if entityTooSmall {
stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompleteEntityTooSmall).Inc()
return nil, s3err.ErrEntityTooSmall
}
mime := pentry.Attributes.Mime
var finalParts []*filer_pb.FileChunk
var offset int64
sort.Ints(completedPartNumbers)
for _, partNumber := range completedPartNumbers {
partEntriesByNumber, ok := partEntries[partNumber]
if !ok {

5
weed/stats/metrics_names.go

@ -46,8 +46,9 @@ const (
// s3 handler
ErrorCompletedNoSuchUpload = "errorCompletedNoSuchUpload"
ErrorCompletedPartEmpty = "ErrorCompletedPartEmpty"
ErrorCompletedPartNumber = "ErrorCompletedPartNumber"
ErrorCompleteEntityTooSmall = "errorCompleteEntityTooSmall"
ErrorCompletedPartEmpty = "errorCompletedPartEmpty"
ErrorCompletedPartNumber = "errorCompletedPartNumber"
ErrorCompletedPartNotFound = "errorCompletedPartNotFound"
ErrorCompletedEtagInvalid = "errorCompletedEtagInvalid"
ErrorCompletedEtagMismatch = "errorCompletedEtagMismatch"

Loading…
Cancel
Save