You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

178 lines
5.8 KiB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package shell
  2. import (
  3. "context"
  4. "flag"
  5. "fmt"
  6. "github.com/seaweedfs/seaweedfs/weed/pb"
  7. "golang.org/x/sync/errgroup"
  8. "io"
  9. "time"
  10. "google.golang.org/grpc"
  11. "github.com/seaweedfs/seaweedfs/weed/operation"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  14. )
  15. func init() {
  16. Commands = append(Commands, &commandVolumeTierUpload{})
  17. }
  18. type commandVolumeTierUpload struct {
  19. }
  20. func (c *commandVolumeTierUpload) Name() string {
  21. return "volume.tier.upload"
  22. }
  23. func (c *commandVolumeTierUpload) Help() string {
  24. return `upload the dat file of a volume to a remote tier
  25. volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h]
  26. volume.tier.upload [-collection=""] -volumeId=<volume_id> -dest=<storage_backend> [-keepLocalDatFile]
  27. e.g.:
  28. volume.tier.upload -volumeId=7 -dest=s3
  29. volume.tier.upload -volumeId=7 -dest=s3.default
  30. The <storage_backend> is defined in master.toml.
  31. For example, "s3.default" in [storage.backend.s3.default]
  32. This command will move the dat file of a volume to a remote tier.
  33. SeaweedFS enables scalable and fast local access to lots of files,
  34. and the cloud storage is slower by cost efficient. How to combine them together?
  35. Usually the data follows 80/20 rule: only 20% of data is frequently accessed.
  36. We can offload the old volumes to the cloud.
  37. With this, SeaweedFS can be both fast and scalable, and infinite storage space.
  38. Just add more local SeaweedFS volume servers to increase the throughput.
  39. The index file is still local, and the same O(1) disk read is applied to the remote file.
  40. `
  41. }
  42. func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  43. tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  44. volumeId := tierCommand.Int("volumeId", 0, "the volume id")
  45. collection := tierCommand.String("collection", "", "the collection name")
  46. fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
  47. quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
  48. dest := tierCommand.String("dest", "", "the target tier name")
  49. keepLocalDatFile := tierCommand.Bool("keepLocalDatFile", false, "whether keep local dat file")
  50. concurrency := tierCommand.Int("concurrency", 1, "concurrency to use when uploading")
  51. if err = tierCommand.Parse(args); err != nil {
  52. return nil
  53. }
  54. if err = commandEnv.confirmIsLocked(args); err != nil {
  55. return
  56. }
  57. vid := needle.VolumeId(*volumeId)
  58. // volumeId is provided
  59. if vid != 0 {
  60. return doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
  61. }
  62. // apply to all volumes in the collection
  63. // reusing collectVolumeIdsForEcEncode for now
  64. volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
  65. if err != nil {
  66. return err
  67. }
  68. fmt.Printf("tier upload volumes: %v\n", volumeIds)
  69. eg, ctx := errgroup.WithContext(context.Background())
  70. eg.SetLimit(*concurrency)
  71. for _, _vid := range volumeIds {
  72. vid := _vid // capture the loop variable
  73. eg.Go(func() error {
  74. if ctx.Err() != nil {
  75. return nil
  76. }
  77. if err = doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil {
  78. fmt.Fprintf(writer, "tier upload volume %v error: %v\n", vid, err)
  79. return err
  80. }
  81. return nil
  82. })
  83. }
  84. return eg.Wait()
  85. }
  86. func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) {
  87. // find volume location
  88. existingLocations, found := commandEnv.MasterClient.GetLocationsClone(uint32(vid))
  89. if !found {
  90. return fmt.Errorf("volume %d not found", vid)
  91. }
  92. err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, existingLocations, false)
  93. if err != nil {
  94. return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, existingLocations[0].Url, err)
  95. }
  96. // copy the .dat file to remote tier
  97. err = uploadDatToRemoteTier(commandEnv.option.GrpcDialOption, writer, vid, collection, existingLocations[0].ServerAddress(), dest, keepLocalDatFile)
  98. if err != nil {
  99. return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, existingLocations[0].Url, dest, err)
  100. }
  101. // now the first replica has the .idx and .vif files.
  102. // ask replicas on other volume server to delete its own local copy
  103. for i, location := range existingLocations {
  104. if i == 0 {
  105. break
  106. }
  107. fmt.Printf("delete volume %d from %s\n", vid, location.Url)
  108. err = deleteVolume(commandEnv.option.GrpcDialOption, vid, location.ServerAddress(), false)
  109. if err != nil {
  110. return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, vid, err)
  111. }
  112. }
  113. return nil
  114. }
  115. func uploadDatToRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress, dest string, keepLocalDatFile bool) error {
  116. err := operation.WithVolumeServerClient(true, sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  117. stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
  118. VolumeId: uint32(volumeId),
  119. Collection: collection,
  120. DestinationBackendName: dest,
  121. KeepLocalDatFile: keepLocalDatFile,
  122. })
  123. var lastProcessed int64
  124. for {
  125. resp, recvErr := stream.Recv()
  126. if recvErr != nil {
  127. if recvErr == io.EOF {
  128. break
  129. } else {
  130. return recvErr
  131. }
  132. }
  133. processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0
  134. fmt.Fprintf(writer, "copied %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed)
  135. lastProcessed = resp.Processed
  136. }
  137. return copyErr
  138. })
  139. return err
  140. }