You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

151 lines
4.1 KiB

6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
2 years ago
2 years ago
6 years ago
6 years ago
6 years ago
5 years ago
  1. package filersink
  2. import (
  3. "fmt"
  4. "github.com/schollz/progressbar/v3"
  5. "github.com/seaweedfs/seaweedfs/weed/util"
  6. "os"
  7. "path/filepath"
  8. "sync"
  9. "google.golang.org/grpc"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/operation"
  12. "github.com/seaweedfs/seaweedfs/weed/pb"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  14. )
  15. func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) {
  16. if len(sourceChunks) == 0 {
  17. return
  18. }
  19. // a simple progress bar. Not ideal. Fix me.
  20. var bar *progressbar.ProgressBar
  21. if len(sourceChunks) > 1 {
  22. name := filepath.Base(path)
  23. bar = progressbar.NewOptions64(int64(len(sourceChunks)),
  24. progressbar.OptionClearOnFinish(),
  25. progressbar.OptionOnCompletion(func() {
  26. fmt.Fprint(os.Stderr, "\n")
  27. }),
  28. progressbar.OptionFullWidth(),
  29. progressbar.OptionSetDescription(name),
  30. )
  31. }
  32. replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks))
  33. var wg sync.WaitGroup
  34. for chunkIndex, sourceChunk := range sourceChunks {
  35. wg.Add(1)
  36. index, source := chunkIndex, sourceChunk
  37. fs.executor.Execute(func() {
  38. defer wg.Done()
  39. util.Retry("replicate chunks", func() error {
  40. replicatedChunk, e := fs.replicateOneChunk(source, path)
  41. if e != nil {
  42. err = e
  43. return e
  44. }
  45. replicatedChunks[index] = replicatedChunk
  46. if bar != nil {
  47. bar.Add(1)
  48. }
  49. err = nil
  50. return nil
  51. })
  52. })
  53. }
  54. wg.Wait()
  55. return
  56. }
  57. func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path string) (*filer_pb.FileChunk, error) {
  58. fileId, err := fs.fetchAndWrite(sourceChunk, path)
  59. if err != nil {
  60. return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
  61. }
  62. return &filer_pb.FileChunk{
  63. FileId: fileId,
  64. Offset: sourceChunk.Offset,
  65. Size: sourceChunk.Size,
  66. ModifiedTsNs: sourceChunk.ModifiedTsNs,
  67. ETag: sourceChunk.ETag,
  68. SourceFileId: sourceChunk.GetFileIdString(),
  69. CipherKey: sourceChunk.CipherKey,
  70. IsCompressed: sourceChunk.IsCompressed,
  71. }, nil
  72. }
  73. func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) (fileId string, err error) {
  74. filename, header, resp, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
  75. if err != nil {
  76. return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
  77. }
  78. defer util.CloseResponse(resp)
  79. fileId, uploadResult, err, _ := operation.UploadWithRetry(
  80. fs,
  81. &filer_pb.AssignVolumeRequest{
  82. Count: 1,
  83. Replication: fs.replication,
  84. Collection: fs.collection,
  85. TtlSec: fs.ttlSec,
  86. DataCenter: fs.dataCenter,
  87. DiskType: fs.diskType,
  88. Path: path,
  89. },
  90. &operation.UploadOption{
  91. Filename: filename,
  92. Cipher: false,
  93. IsInputCompressed: "gzip" == header.Get("Content-Encoding"),
  94. MimeType: header.Get("Content-Type"),
  95. PairMap: nil,
  96. },
  97. func(host, fileId string) string {
  98. fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
  99. if fs.writeChunkByFiler {
  100. fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId)
  101. }
  102. glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
  103. return fileUrl
  104. },
  105. resp.Body,
  106. )
  107. if err != nil {
  108. glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err)
  109. return "", fmt.Errorf("upload data: %v", err)
  110. }
  111. if uploadResult.Error != "" {
  112. glog.V(0).Infof("upload failure %v: %v", filename, err)
  113. return "", fmt.Errorf("upload result: %v", uploadResult.Error)
  114. }
  115. return
  116. }
  117. var _ = filer_pb.FilerClient(&FilerSink{})
  118. func (fs *FilerSink) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error {
  119. return pb.WithGrpcClient(streamingMode, fs.signature, func(grpcConnection *grpc.ClientConn) error {
  120. client := filer_pb.NewSeaweedFilerClient(grpcConnection)
  121. return fn(client)
  122. }, fs.grpcAddress, false, fs.grpcDialOption)
  123. }
  124. func (fs *FilerSink) AdjustedUrl(location *filer_pb.Location) string {
  125. return location.Url
  126. }
  127. func (fs *FilerSink) GetDataCenter() string {
  128. return fs.dataCenter
  129. }