You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

183 lines
5.4 KiB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
  1. package weed_server
  2. import (
  3. "bytes"
  4. "crypto/md5"
  5. "hash"
  6. "io"
  7. "io/ioutil"
  8. "net/http"
  9. "sort"
  10. "strings"
  11. "sync"
  12. "sync/atomic"
  13. "time"
  14. "github.com/chrislusf/seaweedfs/weed/filer"
  15. "github.com/chrislusf/seaweedfs/weed/glog"
  16. "github.com/chrislusf/seaweedfs/weed/operation"
  17. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  18. "github.com/chrislusf/seaweedfs/weed/security"
  19. "github.com/chrislusf/seaweedfs/weed/stats"
  20. "github.com/chrislusf/seaweedfs/weed/util"
  21. )
  22. var bufPool = sync.Pool{
  23. New: func() interface{} {
  24. return new(bytes.Buffer)
  25. },
  26. }
  27. func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
  28. md5Hash = md5.New()
  29. var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
  30. var wg sync.WaitGroup
  31. var bytesBufferCounter int64
  32. bytesBufferLimitCond := sync.NewCond(new(sync.Mutex))
  33. var fileChunksLock sync.Mutex
  34. for {
  35. // need to throttle used byte buffer
  36. bytesBufferLimitCond.L.Lock()
  37. for atomic.LoadInt64(&bytesBufferCounter) >= 4 {
  38. glog.V(4).Infof("waiting for byte buffer %d", bytesBufferCounter)
  39. bytesBufferLimitCond.Wait()
  40. }
  41. atomic.AddInt64(&bytesBufferCounter, 1)
  42. bytesBufferLimitCond.L.Unlock()
  43. bytesBuffer := bufPool.Get().(*bytes.Buffer)
  44. glog.V(4).Infof("received byte buffer %d", bytesBufferCounter)
  45. limitedReader := io.LimitReader(partReader, int64(chunkSize))
  46. bytesBuffer.Reset()
  47. dataSize, err := bytesBuffer.ReadFrom(limitedReader)
  48. // data, err := ioutil.ReadAll(limitedReader)
  49. if err != nil || dataSize == 0 {
  50. bufPool.Put(bytesBuffer)
  51. atomic.AddInt64(&bytesBufferCounter, -1)
  52. bytesBufferLimitCond.Signal()
  53. break
  54. }
  55. if chunkOffset == 0 && !isAppend(r) {
  56. if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) {
  57. chunkOffset += dataSize
  58. smallContent = make([]byte, dataSize)
  59. bytesBuffer.Read(smallContent)
  60. bufPool.Put(bytesBuffer)
  61. atomic.AddInt64(&bytesBufferCounter, -1)
  62. bytesBufferLimitCond.Signal()
  63. break
  64. }
  65. }
  66. wg.Add(1)
  67. go func(offset int64) {
  68. defer func() {
  69. bufPool.Put(bytesBuffer)
  70. atomic.AddInt64(&bytesBufferCounter, -1)
  71. bytesBufferLimitCond.Signal()
  72. wg.Done()
  73. }()
  74. chunk, toChunkErr := fs.dataToChunk(fileName, contentType, bytesBuffer.Bytes(), offset, so)
  75. if toChunkErr != nil {
  76. uploadErr = toChunkErr
  77. }
  78. if chunk != nil {
  79. fileChunksLock.Lock()
  80. fileChunks = append(fileChunks, chunk)
  81. fileChunksLock.Unlock()
  82. glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), chunk.FileId, offset, offset+int64(chunk.Size))
  83. }
  84. }(chunkOffset)
  85. // reset variables for the next chunk
  86. chunkOffset = chunkOffset + dataSize
  87. // if last chunk was not at full chunk size, but already exhausted the reader
  88. if dataSize < int64(chunkSize) {
  89. break
  90. }
  91. }
  92. wg.Wait()
  93. if uploadErr != nil {
  94. return nil, md5Hash, 0, uploadErr, nil
  95. }
  96. sort.Slice(fileChunks, func(i, j int) bool {
  97. return fileChunks[i].Offset < fileChunks[j].Offset
  98. })
  99. return fileChunks, md5Hash, chunkOffset, nil, smallContent
  100. }
  101. func (fs *FilerServer) doUpload(urlLocation string, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {
  102. stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc()
  103. start := time.Now()
  104. defer func() {
  105. stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds())
  106. }()
  107. uploadOption := &operation.UploadOption{
  108. UploadUrl: urlLocation,
  109. Filename: fileName,
  110. Cipher: fs.option.Cipher,
  111. IsInputCompressed: false,
  112. MimeType: contentType,
  113. PairMap: pairMap,
  114. Jwt: auth,
  115. }
  116. uploadResult, err, data := operation.Upload(limitedReader, uploadOption)
  117. if uploadResult != nil && uploadResult.RetryCount > 0 {
  118. stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount))
  119. }
  120. return uploadResult, err, data
  121. }
  122. func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) (*filer_pb.FileChunk, error) {
  123. dataReader := util.NewBytesReader(data)
  124. // retry to assign a different file id
  125. var fileId, urlLocation string
  126. var auth security.EncodedJwt
  127. var uploadErr error
  128. var uploadResult *operation.UploadResult
  129. for i := 0; i < 3; i++ {
  130. // assign one file id for one chunk
  131. fileId, urlLocation, auth, uploadErr = fs.assignNewFileInfo(so)
  132. if uploadErr != nil {
  133. glog.V(4).Infof("retry later due to assign error: %v", uploadErr)
  134. time.Sleep(time.Duration(i+1) * 251 * time.Millisecond)
  135. continue
  136. }
  137. // upload the chunk to the volume server
  138. uploadResult, uploadErr, _ = fs.doUpload(urlLocation, dataReader, fileName, contentType, nil, auth)
  139. if uploadErr != nil {
  140. glog.V(4).Infof("retry later due to upload error: %v", uploadErr)
  141. time.Sleep(time.Duration(i+1) * 251 * time.Millisecond)
  142. continue
  143. }
  144. break
  145. }
  146. if uploadErr != nil {
  147. glog.Errorf("upload error: %v", uploadErr)
  148. return nil, uploadErr
  149. }
  150. // if last chunk exhausted the reader exactly at the border
  151. if uploadResult.Size == 0 {
  152. return nil, nil
  153. }
  154. return uploadResult.ToPbFileChunk(fileId, chunkOffset), nil
  155. }