You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

290 lines
11 KiB

2 years ago
2 years ago
1 year ago
3 years ago
3 years ago
2 years ago
3 years ago
2 years ago
2 years ago
11 months ago
11 months ago
2 years ago
11 months ago
2 years ago
  1. package command
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
  6. "os"
  7. "strings"
  8. "time"
  9. "github.com/seaweedfs/seaweedfs/weed/filer"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/pb"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
  14. "github.com/seaweedfs/seaweedfs/weed/remote_storage"
  15. "github.com/seaweedfs/seaweedfs/weed/replication/source"
  16. "github.com/seaweedfs/seaweedfs/weed/util"
  17. "google.golang.org/grpc"
  18. "google.golang.org/protobuf/proto"
  19. )
  20. func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {
  21. // read filer remote storage mount mappings
  22. _, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir)
  23. if detectErr != nil {
  24. return fmt.Errorf("read mount info: %v", detectErr)
  25. }
  26. eachEntryFunc, err := option.makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource)
  27. if err != nil {
  28. return err
  29. }
  30. lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, *option.timeAgo)
  31. processor := NewMetadataProcessor(eachEntryFunc, 128, lastOffsetTs.UnixNano())
  32. var lastLogTsNs = time.Now().UnixNano()
  33. processEventFnWithOffset := pb.AddOffsetFunc(func(resp *filer_pb.SubscribeMetadataResponse) error {
  34. if resp.EventNotification.NewEntry != nil {
  35. if *option.storageClass == "" {
  36. if _, ok := resp.EventNotification.NewEntry.Extended[s3_constants.AmzStorageClass]; ok {
  37. delete(resp.EventNotification.NewEntry.Extended, s3_constants.AmzStorageClass)
  38. }
  39. } else {
  40. resp.EventNotification.NewEntry.Extended[s3_constants.AmzStorageClass] = []byte(*option.storageClass)
  41. }
  42. }
  43. processor.AddSyncJob(resp)
  44. return nil
  45. }, 3*time.Second, func(counter int64, lastTsNs int64) error {
  46. offsetTsNs := processor.processedTsWatermark.Load()
  47. if offsetTsNs == 0 {
  48. return nil
  49. }
  50. // use processor.processedTsWatermark instead of the lastTsNs from the most recent job
  51. now := time.Now().UnixNano()
  52. glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9))
  53. lastLogTsNs = now
  54. return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, offsetTsNs)
  55. })
  56. option.clientEpoch++
  57. metadataFollowOption := &pb.MetadataFollowOption{
  58. ClientName: "filer.remote.sync",
  59. ClientId: option.clientId,
  60. ClientEpoch: option.clientEpoch,
  61. SelfSignature: 0,
  62. PathPrefix: mountedDir,
  63. AdditionalPathPrefixes: []string{filer.DirectoryEtcRemote},
  64. DirectoriesToWatch: nil,
  65. StartTsNs: lastOffsetTs.UnixNano(),
  66. StopTsNs: 0,
  67. EventErrorType: pb.RetryForeverOnError,
  68. }
  69. return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, metadataFollowOption, processEventFnWithOffset)
  70. }
  71. func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {
  72. client, err := remote_storage.GetRemoteStorage(remoteStorage)
  73. if err != nil {
  74. return nil, err
  75. }
  76. handleEtcRemoteChanges := func(resp *filer_pb.SubscribeMetadataResponse) error {
  77. message := resp.EventNotification
  78. if message.NewEntry == nil {
  79. return nil
  80. }
  81. if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
  82. mappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
  83. if readErr != nil {
  84. return fmt.Errorf("unmarshal mappings: %v", readErr)
  85. }
  86. if remoteLoc, found := mappings.Mappings[mountedDir]; found {
  87. if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {
  88. glog.Fatalf("Unexpected mount changes %+v => %+v", remoteStorageMountLocation, remoteLoc)
  89. }
  90. } else {
  91. glog.V(0).Infof("unmounted %s exiting ...", mountedDir)
  92. os.Exit(0)
  93. }
  94. }
  95. if message.NewEntry.Name == remoteStorage.Name+filer.REMOTE_STORAGE_CONF_SUFFIX {
  96. conf := &remote_pb.RemoteConf{}
  97. if err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {
  98. return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.NewEntry.Name, err)
  99. }
  100. remoteStorage = conf
  101. if newClient, err := remote_storage.GetRemoteStorage(remoteStorage); err == nil {
  102. client = newClient
  103. } else {
  104. return err
  105. }
  106. }
  107. return nil
  108. }
  109. eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
  110. message := resp.EventNotification
  111. if strings.HasPrefix(resp.Directory, filer.DirectoryEtcRemote) {
  112. return handleEtcRemoteChanges(resp)
  113. }
  114. if filer_pb.IsEmpty(resp) {
  115. return nil
  116. }
  117. if filer_pb.IsCreate(resp) {
  118. if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) {
  119. return nil
  120. }
  121. if !filer.HasData(message.NewEntry) {
  122. return nil
  123. }
  124. glog.V(2).Infof("create: %+v", resp)
  125. if !shouldSendToRemote(message.NewEntry) {
  126. glog.V(2).Infof("skipping creating: %+v", resp)
  127. return nil
  128. }
  129. dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
  130. if message.NewEntry.IsDirectory {
  131. glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
  132. return client.WriteDirectory(dest, message.NewEntry)
  133. }
  134. glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
  135. remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
  136. if writeErr != nil {
  137. return writeErr
  138. }
  139. return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry)
  140. }
  141. if filer_pb.IsDelete(resp) {
  142. glog.V(2).Infof("delete: %+v", resp)
  143. dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
  144. if message.OldEntry.IsDirectory {
  145. glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
  146. return client.RemoveDirectory(dest)
  147. }
  148. glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
  149. return client.DeleteFile(dest)
  150. }
  151. if message.OldEntry != nil && message.NewEntry != nil {
  152. if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) {
  153. return nil
  154. }
  155. oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
  156. dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
  157. if !shouldSendToRemote(message.NewEntry) {
  158. glog.V(2).Infof("skipping updating: %+v", resp)
  159. return nil
  160. }
  161. if message.NewEntry.IsDirectory {
  162. return client.WriteDirectory(dest, message.NewEntry)
  163. }
  164. if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
  165. if filer.IsSameData(message.OldEntry, message.NewEntry) {
  166. glog.V(2).Infof("update meta: %+v", resp)
  167. return client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry)
  168. }
  169. }
  170. glog.V(2).Infof("update: %+v", resp)
  171. glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
  172. if err := client.DeleteFile(oldDest); err != nil {
  173. if isMultipartUploadFile(resp.Directory, message.OldEntry.Name) {
  174. return nil
  175. }
  176. }
  177. remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
  178. if writeErr != nil {
  179. return writeErr
  180. }
  181. return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry)
  182. }
  183. return nil
  184. }
  185. return eachEntryFunc, nil
  186. }
  187. func retriedWriteFile(client remote_storage.RemoteStorageClient, filerSource *source.FilerSource, newEntry *filer_pb.Entry, dest *remote_pb.RemoteStorageLocation) (remoteEntry *filer_pb.RemoteEntry, err error) {
  188. var writeErr error
  189. err = util.Retry("writeFile", func() error {
  190. reader := filer.NewFileReader(filerSource, newEntry)
  191. glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
  192. remoteEntry, writeErr = client.WriteFile(dest, newEntry, reader)
  193. if writeErr != nil {
  194. return writeErr
  195. }
  196. return nil
  197. })
  198. if err != nil {
  199. glog.Errorf("write to %s: %v", dest, err)
  200. }
  201. return
  202. }
  203. func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, mountedDir string, timeAgo time.Duration) time.Time {
  204. // 1. specified by timeAgo
  205. // 2. last offset timestamp for this directory
  206. // 3. directory creation time
  207. var lastOffsetTs time.Time
  208. if timeAgo == 0 {
  209. mountedDirEntry, err := filer_pb.GetEntry(filerClient, util.FullPath(mountedDir))
  210. if err != nil {
  211. glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err)
  212. return time.Now()
  213. }
  214. lastOffsetTsNs, err := remote_storage.GetSyncOffset(grpcDialOption, filerAddress, mountedDir)
  215. if mountedDirEntry != nil {
  216. if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
  217. lastOffsetTs = time.Unix(0, lastOffsetTsNs)
  218. glog.V(0).Infof("resume from %v", lastOffsetTs)
  219. } else {
  220. lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
  221. }
  222. } else {
  223. lastOffsetTs = time.Now()
  224. }
  225. } else {
  226. lastOffsetTs = time.Now().Add(-timeAgo)
  227. }
  228. return lastOffsetTs
  229. }
  230. func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *remote_pb.RemoteStorageLocation) *remote_pb.RemoteStorageLocation {
  231. source := string(sourcePath[len(mountDir):])
  232. dest := util.FullPath(remoteMountLocation.Path).Child(source)
  233. return &remote_pb.RemoteStorageLocation{
  234. Name: remoteMountLocation.Name,
  235. Bucket: remoteMountLocation.Bucket,
  236. Path: string(dest),
  237. }
  238. }
  239. func shouldSendToRemote(entry *filer_pb.Entry) bool {
  240. if entry.RemoteEntry == nil {
  241. return true
  242. }
  243. if entry.RemoteEntry.RemoteMtime < entry.Attributes.Mtime {
  244. return true
  245. }
  246. return false
  247. }
  248. func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {
  249. remoteEntry.LastLocalSyncTsNs = time.Now().UnixNano()
  250. entry.RemoteEntry = remoteEntry
  251. return filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  252. _, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
  253. Directory: dir,
  254. Entry: entry,
  255. })
  256. return err
  257. })
  258. }
  259. func isMultipartUploadFile(dir string, name string) bool {
  260. return isMultipartUploadDir(dir) && strings.HasSuffix(name, ".part")
  261. }
  262. func isMultipartUploadDir(dir string) bool {
  263. return strings.HasPrefix(dir, "/buckets/") &&
  264. strings.Contains(dir, "/"+s3_constants.MultipartUploadsFolder+"/")
  265. }