From 0a067944cc5fb022943aaf9b7afcffb1ce6dba51 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 25 Dec 2020 02:32:52 -0800 Subject: [PATCH] filer: add retries during volume moving fix https://github.com/chrislusf/seaweedfs/issues/1704 --- .../filer_server_handlers_write_autochunk.go | 31 +++++++++++++++---- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 82c6c11b4..eee39152b 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -209,17 +209,36 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque for { limitedReader := io.LimitReader(partReader, int64(chunkSize)) - // assign one file id for one chunk - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so) - if assignErr != nil { - return nil, nil, 0, assignErr, nil + data, err := ioutil.ReadAll(limitedReader) + if err != nil { + return nil, nil, 0, err, nil } + dataReader := util.NewBytesReader(data) + + // retry to assign a different file id + var fileId, urlLocation string + var auth security.EncodedJwt + var assignErr, uploadErr error + var uploadResult *operation.UploadResult + for i := 0; i < 3; i++ { + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so) + if assignErr != nil { + return nil, nil, 0, assignErr, nil + } - // upload the chunk to the volume server - uploadResult, uploadErr, data := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth) + // upload the chunk to the volume server + uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth) + if uploadErr != nil { + time.Sleep(251 * time.Millisecond) + continue + } + break + } if uploadErr != nil { return nil, nil, 0, uploadErr, nil } + content = data // if last chunk exhausted the reader exactly at the border