You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

254 lines
7.7 KiB

6 years ago
6 years ago
3 years ago
4 years ago
4 years ago
6 years ago
4 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
  1. package filersink
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/pb"
  6. "github.com/seaweedfs/seaweedfs/weed/wdclient"
  7. "math"
  8. "google.golang.org/grpc"
  9. "github.com/seaweedfs/seaweedfs/weed/security"
  10. "github.com/seaweedfs/seaweedfs/weed/filer"
  11. "github.com/seaweedfs/seaweedfs/weed/glog"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  13. "github.com/seaweedfs/seaweedfs/weed/replication/sink"
  14. "github.com/seaweedfs/seaweedfs/weed/replication/source"
  15. "github.com/seaweedfs/seaweedfs/weed/util"
  16. )
  17. type FilerSink struct {
  18. filerSource *source.FilerSource
  19. grpcAddress string
  20. dir string
  21. replication string
  22. collection string
  23. ttlSec int32
  24. diskType string
  25. dataCenter string
  26. grpcDialOption grpc.DialOption
  27. address string
  28. writeChunkByFiler bool
  29. isIncremental bool
  30. }
  31. func init() {
  32. sink.Sinks = append(sink.Sinks, &FilerSink{})
  33. }
  34. func (fs *FilerSink) GetName() string {
  35. return "filer"
  36. }
  37. func (fs *FilerSink) GetSinkToDirectory() string {
  38. return fs.dir
  39. }
  40. func (fs *FilerSink) IsIncremental() bool {
  41. return fs.isIncremental
  42. }
  43. func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
  44. fs.isIncremental = configuration.GetBool(prefix + "is_incremental")
  45. return fs.DoInitialize(
  46. "",
  47. configuration.GetString(prefix+"grpcAddress"),
  48. configuration.GetString(prefix+"directory"),
  49. configuration.GetString(prefix+"replication"),
  50. configuration.GetString(prefix+"collection"),
  51. configuration.GetInt(prefix+"ttlSec"),
  52. configuration.GetString(prefix+"disk"),
  53. security.LoadClientTLS(util.GetViper(), "grpc.client"),
  54. false)
  55. }
  56. func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
  57. fs.filerSource = s
  58. }
  59. func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
  60. replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
  61. fs.address = address
  62. if fs.address == "" {
  63. fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
  64. }
  65. fs.grpcAddress = grpcAddress
  66. fs.dir = dir
  67. fs.replication = replication
  68. fs.collection = collection
  69. fs.ttlSec = int32(ttlSec)
  70. fs.diskType = diskType
  71. fs.grpcDialOption = grpcDialOption
  72. fs.writeChunkByFiler = writeChunkByFiler
  73. return nil
  74. }
  75. func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
  76. dir, name := util.FullPath(key).DirAndName()
  77. glog.V(4).Infof("delete entry: %v", key)
  78. err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
  79. if err != nil {
  80. glog.V(0).Infof("delete entry %s: %v", key, err)
  81. return fmt.Errorf("delete entry %s: %v", key, err)
  82. }
  83. return nil
  84. }
  85. func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
  86. return fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  87. dir, name := util.FullPath(key).DirAndName()
  88. // look up existing entry
  89. lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
  90. Directory: dir,
  91. Name: name,
  92. }
  93. glog.V(1).Infof("lookup: %v", lookupRequest)
  94. if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
  95. if filer.ETag(resp.Entry) == filer.ETag(entry) {
  96. glog.V(3).Infof("already replicated %s", key)
  97. return nil
  98. }
  99. }
  100. replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
  101. if err != nil {
  102. // only warning here since the source chunk may have been deleted already
  103. glog.Warningf("replicate entry chunks %s: %v", key, err)
  104. }
  105. glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
  106. request := &filer_pb.CreateEntryRequest{
  107. Directory: dir,
  108. Entry: &filer_pb.Entry{
  109. Name: name,
  110. IsDirectory: entry.IsDirectory,
  111. Attributes: entry.Attributes,
  112. Chunks: replicatedChunks,
  113. Content: entry.Content,
  114. RemoteEntry: entry.RemoteEntry,
  115. },
  116. IsFromOtherCluster: true,
  117. Signatures: signatures,
  118. }
  119. glog.V(3).Infof("create: %v", request)
  120. if err := filer_pb.CreateEntry(client, request); err != nil {
  121. glog.V(0).Infof("create entry %s: %v", key, err)
  122. return fmt.Errorf("create entry %s: %v", key, err)
  123. }
  124. return nil
  125. })
  126. }
  127. func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
  128. dir, name := util.FullPath(key).DirAndName()
  129. // read existing entry
  130. var existingEntry *filer_pb.Entry
  131. err = fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  132. request := &filer_pb.LookupDirectoryEntryRequest{
  133. Directory: dir,
  134. Name: name,
  135. }
  136. glog.V(4).Infof("lookup entry: %v", request)
  137. resp, err := filer_pb.LookupEntry(client, request)
  138. if err != nil {
  139. glog.V(0).Infof("lookup %s: %v", key, err)
  140. return err
  141. }
  142. existingEntry = resp.Entry
  143. return nil
  144. })
  145. if err != nil {
  146. return false, fmt.Errorf("lookup %s: %v", key, err)
  147. }
  148. glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
  149. if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
  150. // skip if already changed
  151. // this usually happens when the messages are not ordered
  152. glog.V(2).Infof("late updates %s", key)
  153. } else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
  154. // skip if no change
  155. // this usually happens when retrying the replication
  156. glog.V(3).Infof("already replicated %s", key)
  157. } else {
  158. // find out what changed
  159. deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
  160. if err != nil {
  161. return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
  162. }
  163. // delete the chunks that are deleted from the source
  164. if deleteIncludeChunks {
  165. // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
  166. existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.Chunks, deletedChunks)
  167. }
  168. // replicate the chunks that are new in the source
  169. replicatedChunks, err := fs.replicateChunks(newChunks, key)
  170. if err != nil {
  171. return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
  172. }
  173. existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
  174. existingEntry.Attributes = newEntry.Attributes
  175. existingEntry.Extended = newEntry.Extended
  176. existingEntry.HardLinkId = newEntry.HardLinkId
  177. existingEntry.HardLinkCounter = newEntry.HardLinkCounter
  178. existingEntry.Content = newEntry.Content
  179. existingEntry.RemoteEntry = newEntry.RemoteEntry
  180. }
  181. // save updated meta data
  182. return true, fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  183. request := &filer_pb.UpdateEntryRequest{
  184. Directory: newParentPath,
  185. Entry: existingEntry,
  186. IsFromOtherCluster: true,
  187. Signatures: signatures,
  188. }
  189. if _, err := client.UpdateEntry(context.Background(), request); err != nil {
  190. return fmt.Errorf("update existingEntry %s: %v", key, err)
  191. }
  192. return nil
  193. })
  194. }
  195. func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
  196. aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks, 0, math.MaxInt64)
  197. if aErr != nil {
  198. return nil, nil, aErr
  199. }
  200. bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks, 0, math.MaxInt64)
  201. if bErr != nil {
  202. return nil, nil, bErr
  203. }
  204. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
  205. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
  206. newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
  207. newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
  208. return
  209. }