You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

368 lines
11 KiB

3 years ago
3 years ago
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2 years ago
4 years ago
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2 years ago
4 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
2 years ago
3 years ago
4 years ago
2 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
12 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
  1. package operation
  2. import (
  3. "bytes"
  4. "context"
  5. "encoding/json"
  6. "fmt"
  7. "github.com/seaweedfs/seaweedfs/weed/glog"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/security"
  10. "github.com/seaweedfs/seaweedfs/weed/stats"
  11. "github.com/seaweedfs/seaweedfs/weed/util"
  12. "io"
  13. "mime"
  14. "mime/multipart"
  15. "net"
  16. "net/http"
  17. "net/textproto"
  18. "path/filepath"
  19. "strings"
  20. "time"
  21. )
  22. type UploadOption struct {
  23. UploadUrl string
  24. Filename string
  25. Cipher bool
  26. IsInputCompressed bool
  27. MimeType string
  28. PairMap map[string]string
  29. Jwt security.EncodedJwt
  30. RetryForever bool
  31. Md5 string
  32. }
  33. type UploadResult struct {
  34. Name string `json:"name,omitempty"`
  35. Size uint32 `json:"size,omitempty"`
  36. Error string `json:"error,omitempty"`
  37. ETag string `json:"eTag,omitempty"`
  38. CipherKey []byte `json:"cipherKey,omitempty"`
  39. Mime string `json:"mime,omitempty"`
  40. Gzip uint32 `json:"gzip,omitempty"`
  41. ContentMd5 string `json:"contentMd5,omitempty"`
  42. RetryCount int `json:"-"`
  43. }
  44. func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64, tsNs int64) *filer_pb.FileChunk {
  45. fid, _ := filer_pb.ToFileIdObject(fileId)
  46. return &filer_pb.FileChunk{
  47. FileId: fileId,
  48. Offset: offset,
  49. Size: uint64(uploadResult.Size),
  50. ModifiedTsNs: tsNs,
  51. ETag: uploadResult.ContentMd5,
  52. CipherKey: uploadResult.CipherKey,
  53. IsCompressed: uploadResult.Gzip > 0,
  54. Fid: fid,
  55. }
  56. }
  57. // HTTPClient interface for testing
  58. type HTTPClient interface {
  59. Do(req *http.Request) (*http.Response, error)
  60. }
  61. var (
  62. HttpClient HTTPClient
  63. )
  64. func init() {
  65. HttpClient = &http.Client{Transport: &http.Transport{
  66. DialContext: (&net.Dialer{
  67. Timeout: 10 * time.Second,
  68. KeepAlive: 10 * time.Second,
  69. }).DialContext,
  70. MaxIdleConns: 1024,
  71. MaxIdleConnsPerHost: 1024,
  72. }}
  73. }
  74. // UploadWithRetry will retry both assigning volume request and uploading content
  75. // The option parameter does not need to specify UploadUrl and Jwt, which will come from assigning volume.
  76. func UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.AssignVolumeRequest, uploadOption *UploadOption, genFileUrlFn func(host, fileId string) string, reader io.Reader) (fileId string, uploadResult *UploadResult, err error, data []byte) {
  77. doUploadFunc := func() error {
  78. var host string
  79. var auth security.EncodedJwt
  80. // grpc assign volume
  81. if grpcAssignErr := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  82. resp, assignErr := client.AssignVolume(context.Background(), assignRequest)
  83. if assignErr != nil {
  84. glog.V(0).Infof("assign volume failure %v: %v", assignRequest, assignErr)
  85. return assignErr
  86. }
  87. if resp.Error != "" {
  88. return fmt.Errorf("assign volume failure %v: %v", assignRequest, resp.Error)
  89. }
  90. fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth)
  91. loc := resp.Location
  92. host = filerClient.AdjustedUrl(loc)
  93. return nil
  94. }); grpcAssignErr != nil {
  95. return fmt.Errorf("filerGrpcAddress assign volume: %v", grpcAssignErr)
  96. }
  97. uploadOption.UploadUrl = genFileUrlFn(host, fileId)
  98. uploadOption.Jwt = auth
  99. var uploadErr error
  100. uploadResult, uploadErr, data = doUpload(reader, uploadOption)
  101. return uploadErr
  102. }
  103. if uploadOption.RetryForever {
  104. util.RetryForever("uploadWithRetryForever", doUploadFunc, func(err error) (shouldContinue bool) {
  105. glog.V(0).Infof("upload content: %v", err)
  106. return true
  107. })
  108. } else {
  109. err = util.Retry("uploadWithRetry", doUploadFunc)
  110. }
  111. return
  112. }
  113. var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "")
  114. // Upload sends a POST request to a volume server to upload the content with adjustable compression level
  115. func UploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
  116. uploadResult, err = retriedUploadData(data, option)
  117. return
  118. }
  119. // Upload sends a POST request to a volume server to upload the content with fast compression
  120. func Upload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
  121. uploadResult, err, data = doUpload(reader, option)
  122. return
  123. }
  124. func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
  125. bytesReader, ok := reader.(*util.BytesReader)
  126. if ok {
  127. data = bytesReader.Bytes
  128. } else {
  129. data, err = io.ReadAll(reader)
  130. if err != nil {
  131. err = fmt.Errorf("read input: %v", err)
  132. return
  133. }
  134. }
  135. uploadResult, uploadErr := retriedUploadData(data, option)
  136. return uploadResult, uploadErr, data
  137. }
  138. func retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
  139. for i := 0; i < 3; i++ {
  140. if i > 0 {
  141. time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
  142. }
  143. uploadResult, err = doUploadData(data, option)
  144. if err == nil {
  145. uploadResult.RetryCount = i
  146. return
  147. }
  148. glog.Warningf("uploading %d to %s: %v", i, option.UploadUrl, err)
  149. }
  150. return
  151. }
  152. func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
  153. contentIsGzipped := option.IsInputCompressed
  154. shouldGzipNow := false
  155. if !option.IsInputCompressed {
  156. if option.MimeType == "" {
  157. option.MimeType = http.DetectContentType(data)
  158. // println("detect1 mimetype to", MimeType)
  159. if option.MimeType == "application/octet-stream" {
  160. option.MimeType = ""
  161. }
  162. }
  163. if shouldBeCompressed, iAmSure := util.IsCompressableFileType(filepath.Base(option.Filename), option.MimeType); iAmSure && shouldBeCompressed {
  164. shouldGzipNow = true
  165. } else if !iAmSure && option.MimeType == "" && len(data) > 16*1024 {
  166. var compressed []byte
  167. compressed, err = util.GzipData(data[0:128])
  168. shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90%
  169. }
  170. }
  171. var clearDataLen int
  172. // gzip if possible
  173. // this could be double copying
  174. clearDataLen = len(data)
  175. clearData := data
  176. if shouldGzipNow && !option.Cipher {
  177. compressed, compressErr := util.GzipData(data)
  178. // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed))
  179. if compressErr == nil {
  180. data = compressed
  181. contentIsGzipped = true
  182. }
  183. } else if option.IsInputCompressed {
  184. // just to get the clear data length
  185. clearData, err = util.DecompressData(data)
  186. if err == nil {
  187. clearDataLen = len(clearData)
  188. }
  189. }
  190. if option.Cipher {
  191. // encrypt(gzip(data))
  192. // encrypt
  193. cipherKey := util.GenCipherKey()
  194. encryptedData, encryptionErr := util.Encrypt(clearData, cipherKey)
  195. if encryptionErr != nil {
  196. err = fmt.Errorf("encrypt input: %v", encryptionErr)
  197. return
  198. }
  199. // upload data
  200. uploadResult, err = upload_content(func(w io.Writer) (err error) {
  201. _, err = w.Write(encryptedData)
  202. return
  203. }, len(encryptedData), &UploadOption{
  204. UploadUrl: option.UploadUrl,
  205. Filename: "",
  206. Cipher: false,
  207. IsInputCompressed: false,
  208. MimeType: "",
  209. PairMap: nil,
  210. Jwt: option.Jwt,
  211. })
  212. if uploadResult == nil {
  213. return
  214. }
  215. uploadResult.Name = option.Filename
  216. uploadResult.Mime = option.MimeType
  217. uploadResult.CipherKey = cipherKey
  218. uploadResult.Size = uint32(clearDataLen)
  219. } else {
  220. // upload data
  221. uploadResult, err = upload_content(func(w io.Writer) (err error) {
  222. _, err = w.Write(data)
  223. return
  224. }, len(data), &UploadOption{
  225. UploadUrl: option.UploadUrl,
  226. Filename: option.Filename,
  227. Cipher: false,
  228. IsInputCompressed: contentIsGzipped,
  229. MimeType: option.MimeType,
  230. PairMap: option.PairMap,
  231. Jwt: option.Jwt,
  232. Md5: option.Md5,
  233. })
  234. if uploadResult == nil {
  235. return
  236. }
  237. uploadResult.Size = uint32(clearDataLen)
  238. if contentIsGzipped {
  239. uploadResult.Gzip = 1
  240. }
  241. }
  242. return uploadResult, err
  243. }
  244. func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize int, option *UploadOption) (*UploadResult, error) {
  245. buf := GetBuffer()
  246. defer PutBuffer(buf)
  247. body_writer := multipart.NewWriter(buf)
  248. h := make(textproto.MIMEHeader)
  249. filename := fileNameEscaper.Replace(option.Filename)
  250. h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, filename))
  251. h.Set("Idempotency-Key", option.UploadUrl)
  252. if option.MimeType == "" {
  253. option.MimeType = mime.TypeByExtension(strings.ToLower(filepath.Ext(option.Filename)))
  254. }
  255. if option.MimeType != "" {
  256. h.Set("Content-Type", option.MimeType)
  257. }
  258. if option.IsInputCompressed {
  259. h.Set("Content-Encoding", "gzip")
  260. }
  261. if option.Md5 != "" {
  262. h.Set("Content-MD5", option.Md5)
  263. }
  264. file_writer, cp_err := body_writer.CreatePart(h)
  265. if cp_err != nil {
  266. glog.V(0).Infoln("error creating form file", cp_err.Error())
  267. return nil, cp_err
  268. }
  269. if err := fillBufferFunction(file_writer); err != nil {
  270. glog.V(0).Infoln("error copying data", err)
  271. return nil, err
  272. }
  273. content_type := body_writer.FormDataContentType()
  274. if err := body_writer.Close(); err != nil {
  275. glog.V(0).Infoln("error closing body", err)
  276. return nil, err
  277. }
  278. req, postErr := http.NewRequest("POST", option.UploadUrl, bytes.NewReader(buf.Bytes()))
  279. if postErr != nil {
  280. glog.V(1).Infof("create upload request %s: %v", option.UploadUrl, postErr)
  281. return nil, fmt.Errorf("create upload request %s: %v", option.UploadUrl, postErr)
  282. }
  283. req.Header.Set("Content-Type", content_type)
  284. for k, v := range option.PairMap {
  285. req.Header.Set(k, v)
  286. }
  287. if option.Jwt != "" {
  288. req.Header.Set("Authorization", "BEARER "+string(option.Jwt))
  289. }
  290. // print("+")
  291. resp, post_err := HttpClient.Do(req)
  292. defer util.CloseResponse(resp)
  293. if post_err != nil {
  294. if strings.Contains(post_err.Error(), "connection reset by peer") ||
  295. strings.Contains(post_err.Error(), "use of closed network connection") {
  296. glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr)
  297. stats.FilerRequestCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc()
  298. resp, post_err = HttpClient.Do(req)
  299. defer util.CloseResponse(resp)
  300. }
  301. }
  302. if post_err != nil {
  303. return nil, fmt.Errorf("upload %s %d bytes to %v: %v", option.Filename, originalDataSize, option.UploadUrl, post_err)
  304. }
  305. // print("-")
  306. var ret UploadResult
  307. etag := getEtag(resp)
  308. if resp.StatusCode == http.StatusNoContent {
  309. ret.ETag = etag
  310. return &ret, nil
  311. }
  312. resp_body, ra_err := io.ReadAll(resp.Body)
  313. if ra_err != nil {
  314. return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err)
  315. }
  316. unmarshal_err := json.Unmarshal(resp_body, &ret)
  317. if unmarshal_err != nil {
  318. glog.Errorf("unmarshal %s: %v", option.UploadUrl, string(resp_body))
  319. return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err)
  320. }
  321. if ret.Error != "" {
  322. return nil, fmt.Errorf("unmarshalled error %v: %v", option.UploadUrl, ret.Error)
  323. }
  324. ret.ETag = etag
  325. ret.ContentMd5 = resp.Header.Get("Content-MD5")
  326. return &ret, nil
  327. }
  328. func getEtag(r *http.Response) (etag string) {
  329. etag = r.Header.Get("ETag")
  330. if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") {
  331. etag = etag[1 : len(etag)-1]
  332. }
  333. return
  334. }