Browse Source

[s3] fix s3 test_multipart_get_part (#5476)

* try fix s3  test_multipart_get_part

* add passed s3 tests

* fix SeaweedFSUploadId

* rm spaces

* convert part request to range

* add passed s3 tests of multipart
pull/5498/head
Konstantin Lebedev 8 months ago
committed by GitHub
parent
commit
33537ae29f
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 11
      .github/workflows/s3tests.yml
  2. 2
      Makefile
  3. 4
      weed/s3api/filer_multipart.go
  4. 7
      weed/s3api/s3_constants/header.go
  5. 17
      weed/s3api/s3api_object_handlers.go
  6. 22
      weed/server/filer_server_handlers_read.go
  7. 1
      weed/stats/metrics_names.go

11
.github/workflows/s3tests.yml

@ -43,7 +43,7 @@ jobs:
cd /__w/seaweedfs/seaweedfs/weed cd /__w/seaweedfs/seaweedfs/weed
go install -buildvcs=false go install -buildvcs=false
set -x set -x
nohup weed -v 0 server -filer -s3 -ip.bind 0.0.0.0 \
nohup weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \
-master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=1024 \ -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=1024 \
-volume.max=100 -volume.preStopSeconds=1 -s3.port=8000 -metricsPort=9324 \ -volume.max=100 -volume.preStopSeconds=1 -s3.port=8000 -metricsPort=9324 \
-s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json & -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json &
@ -164,12 +164,19 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_object_copy_key_not_found \ s3tests_boto3/functional/test_s3.py::test_object_copy_key_not_found \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_small \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_small \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_without_range \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_without_range \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_special_names \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_multiple_sizes \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_get_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload \ s3tests_boto3/functional/test_s3.py::test_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_empty \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_contents \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_contents \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_size_too_small \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_size_too_small \
s3tests_boto3/functional/test_s3.py::test_multipart_resend_first_finishes_last \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_resend_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_missing_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_incorrect_etag \
s3tests_boto3/functional/test_s3.py::test_abort_multipart_upload \ s3tests_boto3/functional/test_s3.py::test_abort_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_list_multipart_upload \ s3tests_boto3/functional/test_s3.py::test_list_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_atomic_read_1mb \ s3tests_boto3/functional/test_s3.py::test_atomic_read_1mb \

2
Makefile

@ -15,7 +15,7 @@ full_install:
cd weed; go install -tags "elastic gocdk sqlite ydb tikv rclone" cd weed; go install -tags "elastic gocdk sqlite ydb tikv rclone"
server: install server: install
weed -v 0 server -s3 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
weed -v 0 server -s3 -filer -filer.maxMB=64 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
benchmark: install warp_install benchmark: install warp_install
pkill weed || true pkill weed || true

4
weed/s3api/filer_multipart.go

@ -103,7 +103,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
if len(entries) == 0 { if len(entries) == 0 {
entryName, dirName := s3a.getEntryNameAndDir(input) entryName, dirName := s3a.getEntryNameAndDir(input)
if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil { if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil {
if uploadId, ok := entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id]; ok && *input.UploadId == string(uploadId) {
if uploadId, ok := entry.Extended[s3_constants.SeaweedFSUploadId]; ok && *input.UploadId == string(uploadId) {
return &CompleteMultipartUploadResult{ return &CompleteMultipartUploadResult{
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))), Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
@ -222,7 +222,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
if entry.Extended == nil { if entry.Extended == nil {
entry.Extended = make(map[string][]byte) entry.Extended = make(map[string][]byte)
} }
entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id] = []byte(*input.UploadId)
entry.Extended[s3_constants.SeaweedFSUploadId] = []byte(*input.UploadId)
for k, v := range pentry.Extended { for k, v := range pentry.Extended {
if k != "key" { if k != "key" {
entry.Extended[k] = v entry.Extended[k] = v

7
weed/s3api/s3_constants/header.go

@ -38,8 +38,9 @@ const (
AmzObjectTaggingDirective = "X-Amz-Tagging-Directive" AmzObjectTaggingDirective = "X-Amz-Tagging-Directive"
AmzTagCount = "x-amz-tagging-count" AmzTagCount = "x-amz-tagging-count"
X_SeaweedFS_Header_Directory_Key = "x-seaweedfs-is-directory-key"
X_SeaweedFS_Header_Upload_Id = "X-Seaweedfs-Upload-Id"
SeaweedFSIsDirectoryKey = "X-Seaweedfs-Is-Directory-Key"
SeaweedFSPartNumber = "X-Seaweedfs-Part-Number"
SeaweedFSUploadId = "X-Seaweedfs-Upload-Id"
// S3 ACL headers // S3 ACL headers
AmzCannedAcl = "X-Amz-Acl" AmzCannedAcl = "X-Amz-Acl"
@ -48,6 +49,8 @@ const (
AmzAclWrite = "X-Amz-Grant-Write" AmzAclWrite = "X-Amz-Grant-Write"
AmzAclReadAcp = "X-Amz-Grant-Read-Acp" AmzAclReadAcp = "X-Amz-Grant-Read-Acp"
AmzAclWriteAcp = "X-Amz-Grant-Write-Acp" AmzAclWriteAcp = "X-Amz-Grant-Write-Acp"
AmzMpPartsCount = "X-Amz-Mp-Parts-Count"
) )
// Non-Standard S3 HTTP request constants // Non-Standard S3 HTTP request constants

17
weed/s3api/s3api_object_handlers.go

@ -370,6 +370,9 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
if _, ok := s3_constants.PassThroughHeaders[strings.ToLower(k)]; ok { if _, ok := s3_constants.PassThroughHeaders[strings.ToLower(k)]; ok {
proxyReq.Header[k] = v proxyReq.Header[k] = v
} }
if k == "partNumber" {
proxyReq.Header[s3_constants.SeaweedFSPartNumber] = v
}
} }
for header, values := range r.Header { for header, values := range r.Header {
proxyReq.Header[header] = values proxyReq.Header[header] = values
@ -411,7 +414,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
} }
TimeToFirstByte(r.Method, start, r) TimeToFirstByte(r.Method, start, r)
if resp.Header.Get(s3_constants.X_SeaweedFS_Header_Directory_Key) == "true" {
if resp.Header.Get(s3_constants.SeaweedFSIsDirectoryKey) == "true" {
responseStatusCode := responseFn(resp, w) responseStatusCode := responseFn(resp, w)
s3err.PostLog(r, responseStatusCode, s3err.ErrNone) s3err.PostLog(r, responseStatusCode, s3err.ErrNone)
return return
@ -429,6 +432,18 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
return return
} }
if resp.StatusCode == http.StatusBadRequest {
resp_body, _ := io.ReadAll(resp.Body)
switch string(resp_body) {
case "InvalidPart":
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
default:
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
}
resp.Body.Close()
return
}
setUserMetadataKeyToLowercase(resp) setUserMetadataKeyToLowercase(resp)
responseStatusCode := responseFn(resp, w) responseStatusCode := responseFn(resp, w)

22
weed/server/filer_server_handlers_read.go

@ -3,6 +3,8 @@ package weed_server
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/base64"
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -132,7 +134,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return return
} }
// inform S3 API this is a user created directory key object // inform S3 API this is a user created directory key object
w.Header().Set(s3_constants.X_SeaweedFS_Header_Directory_Key, "true")
w.Header().Set(s3_constants.SeaweedFSIsDirectoryKey, "true")
} }
if isForDirectory && entry.Attr.Mime != s3_constants.FolderMimeType { if isForDirectory && entry.Attr.Mime != s3_constants.FolderMimeType {
@ -158,7 +160,22 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return return
} }
etag := filer.ETagEntry(entry)
var etag string
if partNumber, errNum := strconv.Atoi(r.Header.Get(s3_constants.SeaweedFSPartNumber)); errNum == nil {
if len(entry.Chunks) < partNumber {
stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadChunk).Inc()
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("InvalidPart"))
return
}
w.Header().Set(s3_constants.AmzMpPartsCount, strconv.Itoa(len(entry.Chunks)))
partChunk := entry.GetChunks()[partNumber-1]
md5, _ := base64.StdEncoding.DecodeString(partChunk.ETag)
etag = hex.EncodeToString(md5)
r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", partChunk.Offset, uint64(partChunk.Offset)+partChunk.Size-1))
} else {
etag = filer.ETagEntry(entry)
}
w.Header().Set("Accept-Ranges", "bytes") w.Header().Set("Accept-Ranges", "bytes")
// mime type // mime type
@ -207,7 +224,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
filename := entry.Name() filename := entry.Name()
adjustPassthroughHeaders(w, r, filename) adjustPassthroughHeaders(w, r, filename)
totalSize := int64(entry.Size()) totalSize := int64(entry.Size())
if r.Method == "HEAD" { if r.Method == "HEAD" {

1
weed/stats/metrics_names.go

@ -41,6 +41,7 @@ const (
ErrorWriteEntry = "write.entry.failed" ErrorWriteEntry = "write.entry.failed"
RepeatErrorUploadContent = "upload.content.repeat.failed" RepeatErrorUploadContent = "upload.content.repeat.failed"
ErrorChunkAssign = "chunkAssign.failed" ErrorChunkAssign = "chunkAssign.failed"
ErrorReadChunk = "read.chunk.failed"
ErrorReadCache = "read.cache.failed" ErrorReadCache = "read.cache.failed"
ErrorReadStream = "read.stream.failed" ErrorReadStream = "read.stream.failed"

Loading…
Cancel
Save