From 18c40686d9c9f72f80a420c7a464d13cf33520d8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 15 Jul 2021 11:56:28 -0700 Subject: [PATCH] s3: multipart upload miss data if file is chunked in 4MB fix https://github.com/chrislusf/seaweedfs/issues/2195 --- weed/server/filer_server_handlers_write_upload.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index 395852517..2275ff1bc 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -28,19 +28,14 @@ var bufPool = sync.Pool{ }, } -func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) { +func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { - md5Hash := md5.New() + md5Hash = md5.New() var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) - chunkOffset := int64(0) - var smallContent []byte - var uploadErr error - var wg sync.WaitGroup var bytesBufferCounter int64 bytesBufferLimitCond := sync.NewCond(new(sync.Mutex)) - var fileChunks []*filer_pb.FileChunk var fileChunksLock sync.Mutex for { @@ -67,7 +62,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque bufPool.Put(bytesBuffer) atomic.AddInt64(&bytesBufferCounter, -1) bytesBufferLimitCond.Signal() - return nil, md5Hash, 0, err, nil + break } if chunkOffset == 0 && !isAppend(r) { if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) {