Browse Source

filer: set file size, streaming chunk file uploading

fix https://github.com/chrislusf/seaweedfs/issues/1193
fasthttp
Chris Lu 5 years ago
parent
commit
382ff218d3
  1. 4
      weed/server/filer_server_handlers_write.go
  2. 101
      weed/server/filer_server_handlers_write_autochunk.go

4
weed/server/filer_server_handlers_write.go

@ -32,7 +32,7 @@ var (
type FilerPostResult struct {
Name string `json:"name,omitempty"`
Size uint32 `json:"size,omitempty"`
Size int64 `json:"size,omitempty"`
Error string `json:"error,omitempty"`
Fid string `json:"fid,omitempty"`
Url string `json:"url,omitempty"`
@ -130,7 +130,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
// send back post result
reply := FilerPostResult{
Name: ret.Name,
Size: ret.Size,
Size: int64(ret.Size),
Error: ret.Error,
Fid: fileId,
Url: urlLocation,

101
weed/server/filer_server_handlers_write_autochunk.go

@ -1,10 +1,8 @@
package weed_server
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"path"
"strconv"
@ -92,66 +90,47 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
var fileChunks []*filer_pb.FileChunk
totalBytesRead := int64(0)
tmpBufferSize := int32(1024 * 1024)
tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize))
chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow
chunkBufOffset := int32(0)
chunkOffset := int64(0)
writtenChunks := 0
filerResult = &FilerPostResult{
Name: fileName,
}
for chunkOffset < contentLength {
limitedReader := io.LimitReader(part1, int64(chunkSize))
for totalBytesRead < contentLength {
tmpBuffer.Reset()
bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize))
readFully := readErr != nil && readErr == io.EOF
tmpBuf := tmpBuffer.Bytes()
bytesToCopy := tmpBuf[0:int(bytesRead)]
copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy)
chunkBufOffset = chunkBufOffset + int32(bytesRead)
if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) {
writtenChunks = writtenChunks + 1
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
if assignErr != nil {
return nil, assignErr
}
// upload the chunk to the volume server
chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10)
uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "", fileId, auth)
if uploadErr != nil {
return nil, uploadErr
}
// Save to chunk manifest structure
fileChunks = append(fileChunks,
&filer_pb.FileChunk{
FileId: fileId,
Offset: chunkOffset,
Size: uint64(chunkBufOffset),
Mtime: time.Now().UnixNano(),
},
)
// reset variables for the next chunk
chunkBufOffset = 0
chunkOffset = totalBytesRead + int64(bytesRead)
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
if assignErr != nil {
return nil, assignErr
}
totalBytesRead = totalBytesRead + int64(bytesRead)
// upload the chunk to the volume server
chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10)
uploadedSize, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth)
if uploadErr != nil {
return nil, uploadErr
}
if bytesRead == 0 || readFully {
// if last chunk exhausted the reader exactly at the border
if uploadedSize == 0 {
break
}
if readErr != nil {
return nil, readErr
// Save to chunk manifest structure
fileChunks = append(fileChunks,
&filer_pb.FileChunk{
FileId: fileId,
Offset: chunkOffset,
Size: uint64(uploadedSize),
Mtime: time.Now().UnixNano(),
},
)
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadedSize), contentLength)
// if last chunk was not at full chunk size, but already exhausted the reader
if uploadedSize < int64(chunkSize) {
break
}
// reset variables for the next chunk
chunkOffset = chunkOffset + int64(uploadedSize)
}
path := r.URL.Path
@ -176,6 +155,12 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
},
Chunks: fileChunks,
}
filerResult = &FilerPostResult{
Name: fileName,
Size: chunkOffset,
}
if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil {
fs.filer.DeleteChunks(entry.Chunks)
replyerr = dbErr
@ -188,7 +173,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
}
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request,
chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) {
limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (size int64, err error) {
stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc()
start := time.Now()
@ -196,13 +181,9 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
}()
ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf))
uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth)
if uploadResult != nil {
glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size)
}
uploadResult, uploadError := operation.Upload(urlLocation, fileName, limitedReader, false, contentType, nil, auth)
if uploadError != nil {
err = uploadError
return 0, uploadError
}
return
return int64(uploadResult.Size), nil
}
Loading…
Cancel
Save