You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

166 lines
6.1 KiB

3 years ago
2 years ago
  1. package command
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/glog"
  5. "github.com/seaweedfs/seaweedfs/weed/pb"
  6. "github.com/seaweedfs/seaweedfs/weed/replication/source"
  7. "github.com/seaweedfs/seaweedfs/weed/security"
  8. "github.com/seaweedfs/seaweedfs/weed/util"
  9. "google.golang.org/grpc"
  10. "regexp"
  11. "time"
  12. )
  13. type FilerBackupOptions struct {
  14. isActivePassive *bool
  15. filer *string
  16. path *string
  17. excludePaths *string
  18. excludeFileName *string
  19. debug *bool
  20. proxyByFiler *bool
  21. timeAgo *time.Duration
  22. retentionDays *int
  23. }
  24. var (
  25. filerBackupOptions FilerBackupOptions
  26. )
  27. func init() {
  28. cmdFilerBackup.Run = runFilerBackup // break init cycle
  29. filerBackupOptions.filer = cmdFilerBackup.Flag.String("filer", "localhost:8888", "filer of one SeaweedFS cluster")
  30. filerBackupOptions.path = cmdFilerBackup.Flag.String("filerPath", "/", "directory to sync on filer")
  31. filerBackupOptions.excludePaths = cmdFilerBackup.Flag.String("filerExcludePaths", "", "exclude directories to sync on filer")
  32. filerBackupOptions.excludeFileName = cmdFilerBackup.Flag.String("filerExcludeFileName", "", "exclude file names that match the regexp to sync on filer")
  33. filerBackupOptions.proxyByFiler = cmdFilerBackup.Flag.Bool("filerProxy", false, "read and write file chunks by filer instead of volume servers")
  34. filerBackupOptions.debug = cmdFilerBackup.Flag.Bool("debug", false, "debug mode to print out received files")
  35. filerBackupOptions.timeAgo = cmdFilerBackup.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
  36. filerBackupOptions.retentionDays = cmdFilerBackup.Flag.Int("retentionDays", 0, "incremental backup retention days")
  37. }
  38. var cmdFilerBackup = &Command{
  39. UsageLine: "filer.backup -filer=<filerHost>:<filerPort> ",
  40. Short: "resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml",
  41. Long: `resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml
  42. filer.backup listens on filer notifications. If any file is updated, it will fetch the updated content,
  43. and write to the destination. This is to replace filer.replicate command since additional message queue is not needed.
  44. If restarted and "-timeAgo" is not set, the synchronization will resume from the previous checkpoints, persisted every minute.
  45. A fresh sync will start from the earliest metadata logs. To reset the checkpoints, just set "-timeAgo" to a high value.
  46. `,
  47. }
  48. func runFilerBackup(cmd *Command, args []string) bool {
  49. util.LoadConfiguration("security", false)
  50. util.LoadConfiguration("replication", true)
  51. grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
  52. clientId := util.RandomInt32()
  53. var clientEpoch int32
  54. for {
  55. clientEpoch++
  56. err := doFilerBackup(grpcDialOption, &filerBackupOptions, clientId, clientEpoch)
  57. if err != nil {
  58. glog.Errorf("backup from %s: %v", *filerBackupOptions.filer, err)
  59. time.Sleep(1747 * time.Millisecond)
  60. }
  61. }
  62. return true
  63. }
  64. const (
  65. BackupKeyPrefix = "backup."
  66. )
  67. func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOptions, clientId int32, clientEpoch int32) error {
  68. // find data sink
  69. config := util.GetViper()
  70. dataSink := findSink(config)
  71. if dataSink == nil {
  72. return fmt.Errorf("no data sink configured in replication.toml")
  73. }
  74. sourceFiler := pb.ServerAddress(*backupOption.filer)
  75. sourcePath := *backupOption.path
  76. excludePaths := util.StringSplit(*backupOption.excludePaths, ",")
  77. var reExcludeFileName *regexp.Regexp
  78. if *backupOption.excludeFileName != "" {
  79. var err error
  80. if reExcludeFileName, err = regexp.Compile(*backupOption.excludeFileName); err != nil {
  81. return fmt.Errorf("error compile regexp %v for exclude file name: %+v", *backupOption.excludeFileName, err)
  82. }
  83. }
  84. timeAgo := *backupOption.timeAgo
  85. targetPath := dataSink.GetSinkToDirectory()
  86. debug := *backupOption.debug
  87. // get start time for the data sink
  88. startFrom := time.Unix(0, 0)
  89. sinkId := util.HashStringToLong(dataSink.GetName() + dataSink.GetSinkToDirectory())
  90. if timeAgo.Milliseconds() == 0 {
  91. lastOffsetTsNs, err := getOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId))
  92. if err != nil {
  93. glog.V(0).Infof("starting from %v", startFrom)
  94. } else {
  95. startFrom = time.Unix(0, lastOffsetTsNs)
  96. glog.V(0).Infof("resuming from %v", startFrom)
  97. }
  98. } else {
  99. startFrom = time.Now().Add(-timeAgo)
  100. glog.V(0).Infof("start time is set to %v", startFrom)
  101. }
  102. // create filer sink
  103. filerSource := &source.FilerSource{}
  104. filerSource.DoInitialize(
  105. sourceFiler.ToHttpAddress(),
  106. sourceFiler.ToGrpcAddress(),
  107. sourcePath,
  108. *backupOption.proxyByFiler)
  109. dataSink.SetSourceFiler(filerSource)
  110. processEventFn := genProcessFunction(sourcePath, targetPath, excludePaths, reExcludeFileName, dataSink, debug)
  111. processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
  112. glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
  113. return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs)
  114. })
  115. if dataSink.IsIncremental() && *filerBackupOptions.retentionDays > 0 {
  116. go func() {
  117. for {
  118. now := time.Now()
  119. time.Sleep(time.Hour * 24)
  120. key := util.Join(targetPath, now.Add(-1*time.Hour*24*time.Duration(*filerBackupOptions.retentionDays)).Format("2006-01-02"))
  121. _ = dataSink.DeleteEntry(util.Join(targetPath, key), true, true, nil)
  122. glog.V(0).Infof("incremental backup delete directory:%s", key)
  123. }
  124. }()
  125. }
  126. metadataFollowOption := &pb.MetadataFollowOption{
  127. ClientName: "backup_" + dataSink.GetName(),
  128. ClientId: clientId,
  129. ClientEpoch: clientEpoch,
  130. SelfSignature: 0,
  131. PathPrefix: sourcePath,
  132. AdditionalPathPrefixes: nil,
  133. DirectoriesToWatch: nil,
  134. StartTsNs: startFrom.UnixNano(),
  135. StopTsNs: 0,
  136. EventErrorType: pb.TrivialOnError,
  137. }
  138. return pb.FollowMetadata(sourceFiler, grpcDialOption, metadataFollowOption, processEventFnWithOffset)
  139. }