You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

133 lines
4.6 KiB

3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
  1. package command
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/glog"
  6. "github.com/chrislusf/seaweedfs/weed/pb"
  7. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  8. "github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
  9. "github.com/chrislusf/seaweedfs/weed/replication/source"
  10. "github.com/chrislusf/seaweedfs/weed/security"
  11. "github.com/chrislusf/seaweedfs/weed/util"
  12. "google.golang.org/grpc"
  13. "time"
  14. )
  15. type RemoteSyncOptions struct {
  16. filerAddress *string
  17. grpcDialOption grpc.DialOption
  18. readChunkFromFiler *bool
  19. debug *bool
  20. timeAgo *time.Duration
  21. dir *string
  22. createBucketAt *string
  23. mappings *remote_pb.RemoteStorageMapping
  24. remoteConfs map[string]*remote_pb.RemoteConf
  25. bucketsDir string
  26. }
  27. const (
  28. RemoteSyncKeyPrefix = "remote.sync."
  29. )
  30. var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
  31. func (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
  32. return pb.WithFilerClient(*option.filerAddress, option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
  33. return fn(client)
  34. })
  35. }
  36. func (option *RemoteSyncOptions) AdjustedUrl(location *filer_pb.Location) string {
  37. return location.Url
  38. }
  39. var (
  40. remoteSyncOptions RemoteSyncOptions
  41. )
  42. func init() {
  43. cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
  44. remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
  45. remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "/", "a mounted directory on filer")
  46. remoteSyncOptions.createBucketAt = cmdFilerRemoteSynchronize.Flag.String("createBucketAt", "", "one remote storage name to create new buckets in")
  47. remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
  48. remoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool("debug", false, "debug mode to print out filer updated remote files")
  49. remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
  50. }
  51. var cmdFilerRemoteSynchronize = &Command{
  52. UsageLine: "filer.remote.sync -dir=/mount/s3_on_cloud or -createBucketAt=clound1",
  53. Short: "resumable continuously write back updates to remote storage",
  54. Long: `resumable continuously write back updates to remote storage
  55. filer.remote.sync listens on filer update events.
  56. If any mounted remote file is updated, it will fetch the updated content,
  57. and write to the remote storage.
  58. There are two modes:
  59. 1)Write back one mounted folder to remote storage
  60. weed filer.remote.sync -dir=/mount/s3_on_cloud
  61. 2)Watch /buckets folder and write back all changes.
  62. Any new buckets will be created in this remote storage.
  63. weed filer.remote.sync -createBucketAt=cloud1
  64. `,
  65. }
  66. func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
  67. util.LoadConfiguration("security", false)
  68. grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
  69. remoteSyncOptions.grpcDialOption = grpcDialOption
  70. dir := *remoteSyncOptions.dir
  71. filerAddress := *remoteSyncOptions.filerAddress
  72. filerSource := &source.FilerSource{}
  73. filerSource.DoInitialize(
  74. filerAddress,
  75. pb.ServerToGrpcAddress(filerAddress),
  76. "/", // does not matter
  77. *remoteSyncOptions.readChunkFromFiler,
  78. )
  79. if dir != "" {
  80. fmt.Printf("synchronize %s to remote storage...\n", dir)
  81. util.RetryForever("filer.remote.sync "+dir, func() error {
  82. return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
  83. }, func(err error) bool {
  84. if err != nil {
  85. glog.Errorf("synchronize %s: %v", dir, err)
  86. }
  87. return true
  88. })
  89. }
  90. remoteSyncOptions.bucketsDir = "/buckets"
  91. // check buckets again
  92. remoteSyncOptions.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
  93. resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
  94. if err != nil {
  95. return err
  96. }
  97. remoteSyncOptions.bucketsDir = resp.DirBuckets
  98. return nil
  99. })
  100. storageName := *remoteSyncOptions.createBucketAt
  101. if storageName != "" {
  102. fmt.Printf("synchronize %s, default new bucket creation in %s ...\n", remoteSyncOptions.bucketsDir, storageName)
  103. util.RetryForever("filer.remote.sync buckets "+storageName, func() error {
  104. return remoteSyncOptions.followBucketUpdatesAndUploadToRemote(filerSource)
  105. }, func(err error) bool {
  106. if err != nil {
  107. glog.Errorf("synchronize %s to %s: %v", remoteSyncOptions.bucketsDir, storageName, err)
  108. }
  109. return true
  110. })
  111. }
  112. return true
  113. }