Browse Source

refactor

pull/1960/head
Chris Lu 4 years ago
parent
commit
b5880334fc
  1. 4
      weed/server/filer_server_handlers.go
  2. 4
      weed/server/filer_server_handlers_write.go
  3. 104
      weed/server/filer_server_handlers_write_autochunk.go
  4. 105
      weed/server/filer_server_handlers_write_upload.go

4
weed/server/filer_server_handlers.go

@ -68,12 +68,12 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
if _, ok := r.URL.Query()["tagging"]; ok { if _, ok := r.URL.Query()["tagging"]; ok {
fs.PutTaggingHandler(w, r) fs.PutTaggingHandler(w, r)
} else { } else {
fs.PostHandler(w, r)
fs.PostHandler(w, r, contentLength)
} }
stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds())
} else { // method == "POST" } else { // method == "POST"
stats.FilerRequestCounter.WithLabelValues("post").Inc() stats.FilerRequestCounter.WithLabelValues("post").Inc()
fs.PostHandler(w, r)
fs.PostHandler(w, r, contentLength)
stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds())
} }
case "OPTIONS": case "OPTIONS":

4
weed/server/filer_server_handlers_write.go

@ -52,7 +52,7 @@ func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, u
return return
} }
func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, contentLength int64) {
ctx := context.Background() ctx := context.Background()
@ -66,7 +66,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
query.Get("rack"), query.Get("rack"),
) )
fs.autoChunk(ctx, w, r, so)
fs.autoChunk(ctx, w, r, contentLength, so)
util.CloseRequest(r) util.CloseRequest(r)
} }

104
weed/server/filer_server_handlers_write_autochunk.go

@ -2,11 +2,8 @@ package weed_server
import ( import (
"context" "context"
"crypto/md5"
"fmt" "fmt"
"hash"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"path" "path"
@ -19,13 +16,12 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) {
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, contentLength int64, so *operation.StorageOption) {
// autoChunking can be set at the command-line level or as a query param. Query param overrides command-line // autoChunking can be set at the command-line level or as a query param. Query param overrides command-line
query := r.URL.Query() query := r.URL.Query()
@ -51,10 +47,10 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") { if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") {
reply, err = fs.mkdir(ctx, w, r) reply, err = fs.mkdir(ctx, w, r)
} else { } else {
reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, so)
reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, contentLength, so)
} }
} else { } else {
reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, so)
reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, contentLength, so)
} }
if err != nil { if err != nil {
if strings.HasPrefix(err.Error(), "read input:") { if strings.HasPrefix(err.Error(), "read input:") {
@ -72,7 +68,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
} }
} }
func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
multipartReader, multipartReaderErr := r.MultipartReader() multipartReader, multipartReaderErr := r.MultipartReader()
if multipartReaderErr != nil { if multipartReaderErr != nil {
@ -93,7 +89,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
contentType = "" contentType = ""
} }
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, so)
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, contentLength, so)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -104,7 +100,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite
return return
} }
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
fileName := path.Base(r.URL.Path) fileName := path.Base(r.URL.Path)
contentType := r.Header.Get("Content-Type") contentType := r.Header.Get("Content-Type")
@ -112,7 +108,7 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
contentType = "" contentType = ""
} }
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so)
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, contentLength, so)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -231,92 +227,6 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
return filerResult, replyerr return filerResult, replyerr
} }
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
var fileChunks []*filer_pb.FileChunk
md5Hash := md5.New()
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
chunkOffset := int64(0)
var smallContent []byte
for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
data, err := ioutil.ReadAll(limitedReader)
if err != nil {
return nil, nil, 0, err, nil
}
if chunkOffset == 0 && !isAppend(r) {
if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
smallContent = data
chunkOffset += int64(len(data))
break
}
}
dataReader := util.NewBytesReader(data)
// retry to assign a different file id
var fileId, urlLocation string
var auth security.EncodedJwt
var assignErr, uploadErr error
var uploadResult *operation.UploadResult
for i := 0; i < 3; i++ {
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
if assignErr != nil {
return nil, nil, 0, assignErr, nil
}
// upload the chunk to the volume server
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
if uploadErr != nil {
time.Sleep(251 * time.Millisecond)
continue
}
break
}
if uploadErr != nil {
return nil, nil, 0, uploadErr, nil
}
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
break
}
// Save to chunk manifest structure
fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
// reset variables for the next chunk
chunkOffset = chunkOffset + int64(uploadResult.Size)
// if last chunk was not at full chunk size, but already exhausted the reader
if int64(uploadResult.Size) < int64(chunkSize) {
break
}
}
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {
stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc()
start := time.Now()
defer func() {
stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds())
}()
uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
if uploadResult != nil && uploadResult.RetryCount > 0 {
stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount))
}
return uploadResult, err, data
}
func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType { func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) { return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) {

105
weed/server/filer_server_handlers_write_upload.go

@ -0,0 +1,105 @@
package weed_server
import (
"crypto/md5"
"hash"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
var fileChunks []*filer_pb.FileChunk
md5Hash := md5.New()
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
chunkOffset := int64(0)
var smallContent []byte
for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
data, err := ioutil.ReadAll(limitedReader)
if err != nil {
return nil, nil, 0, err, nil
}
if chunkOffset == 0 && !isAppend(r) {
if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
smallContent = data
chunkOffset += int64(len(data))
break
}
}
dataReader := util.NewBytesReader(data)
// retry to assign a different file id
var fileId, urlLocation string
var auth security.EncodedJwt
var assignErr, uploadErr error
var uploadResult *operation.UploadResult
for i := 0; i < 3; i++ {
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
if assignErr != nil {
return nil, nil, 0, assignErr, nil
}
// upload the chunk to the volume server
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
if uploadErr != nil {
time.Sleep(251 * time.Millisecond)
continue
}
break
}
if uploadErr != nil {
return nil, nil, 0, uploadErr, nil
}
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
break
}
// Save to chunk manifest structure
fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
// reset variables for the next chunk
chunkOffset = chunkOffset + int64(uploadResult.Size)
// if last chunk was not at full chunk size, but already exhausted the reader
if int64(uploadResult.Size) < int64(chunkSize) {
break
}
}
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {
stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc()
start := time.Now()
defer func() {
stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds())
}()
uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
if uploadResult != nil && uploadResult.RetryCount > 0 {
stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount))
}
return uploadResult, err, data
}
Loading…
Cancel
Save