You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

226 lines
6.7 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package filersink
  2. import (
  3. "context"
  4. "fmt"
  5. "google.golang.org/grpc"
  6. "github.com/chrislusf/seaweedfs/weed/security"
  7. "github.com/chrislusf/seaweedfs/weed/filer"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  10. "github.com/chrislusf/seaweedfs/weed/replication/sink"
  11. "github.com/chrislusf/seaweedfs/weed/replication/source"
  12. "github.com/chrislusf/seaweedfs/weed/util"
  13. )
  14. type FilerSink struct {
  15. filerSource *source.FilerSource
  16. grpcAddress string
  17. dir string
  18. replication string
  19. collection string
  20. ttlSec int32
  21. dataCenter string
  22. grpcDialOption grpc.DialOption
  23. }
  24. func init() {
  25. sink.Sinks = append(sink.Sinks, &FilerSink{})
  26. }
  27. func (fs *FilerSink) GetName() string {
  28. return "filer"
  29. }
  30. func (fs *FilerSink) GetSinkToDirectory() string {
  31. return fs.dir
  32. }
  33. func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
  34. return fs.DoInitialize(
  35. configuration.GetString(prefix+"grpcAddress"),
  36. configuration.GetString(prefix+"directory"),
  37. configuration.GetString(prefix+"replication"),
  38. configuration.GetString(prefix+"collection"),
  39. configuration.GetInt(prefix+"ttlSec"),
  40. security.LoadClientTLS(util.GetViper(), "grpc.client"))
  41. }
  42. func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
  43. fs.filerSource = s
  44. }
  45. func (fs *FilerSink) DoInitialize(grpcAddress string, dir string,
  46. replication string, collection string, ttlSec int, grpcDialOption grpc.DialOption) (err error) {
  47. fs.grpcAddress = grpcAddress
  48. fs.dir = dir
  49. fs.replication = replication
  50. fs.collection = collection
  51. fs.ttlSec = int32(ttlSec)
  52. fs.grpcDialOption = grpcDialOption
  53. return nil
  54. }
  55. func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
  56. dir, name := util.FullPath(key).DirAndName()
  57. glog.V(4).Infof("delete entry: %v", key)
  58. err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
  59. if err != nil {
  60. glog.V(0).Infof("delete entry %s: %v", key, err)
  61. return fmt.Errorf("delete entry %s: %v", key, err)
  62. }
  63. return nil
  64. }
  65. func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
  66. return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  67. dir, name := util.FullPath(key).DirAndName()
  68. // look up existing entry
  69. lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
  70. Directory: dir,
  71. Name: name,
  72. }
  73. glog.V(1).Infof("lookup: %v", lookupRequest)
  74. if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
  75. if filer.ETag(resp.Entry) == filer.ETag(entry) {
  76. glog.V(3).Infof("already replicated %s", key)
  77. return nil
  78. }
  79. }
  80. replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
  81. if err != nil {
  82. // only warning here since the source chunk may have been deleted already
  83. glog.Warningf("replicate entry chunks %s: %v", key, err)
  84. }
  85. glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
  86. request := &filer_pb.CreateEntryRequest{
  87. Directory: dir,
  88. Entry: &filer_pb.Entry{
  89. Name: name,
  90. IsDirectory: entry.IsDirectory,
  91. Attributes: entry.Attributes,
  92. Chunks: replicatedChunks,
  93. Content: entry.Content,
  94. },
  95. IsFromOtherCluster: true,
  96. Signatures: signatures,
  97. }
  98. glog.V(3).Infof("create: %v", request)
  99. if err := filer_pb.CreateEntry(client, request); err != nil {
  100. glog.V(0).Infof("create entry %s: %v", key, err)
  101. return fmt.Errorf("create entry %s: %v", key, err)
  102. }
  103. return nil
  104. })
  105. }
  106. func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
  107. dir, name := util.FullPath(key).DirAndName()
  108. // read existing entry
  109. var existingEntry *filer_pb.Entry
  110. err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  111. request := &filer_pb.LookupDirectoryEntryRequest{
  112. Directory: dir,
  113. Name: name,
  114. }
  115. glog.V(4).Infof("lookup entry: %v", request)
  116. resp, err := filer_pb.LookupEntry(client, request)
  117. if err != nil {
  118. glog.V(0).Infof("lookup %s: %v", key, err)
  119. return err
  120. }
  121. existingEntry = resp.Entry
  122. return nil
  123. })
  124. if err != nil {
  125. return false, fmt.Errorf("lookup %s: %v", key, err)
  126. }
  127. glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
  128. if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
  129. // skip if already changed
  130. // this usually happens when the messages are not ordered
  131. glog.V(2).Infof("late updates %s", key)
  132. } else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
  133. // skip if no change
  134. // this usually happens when retrying the replication
  135. glog.V(3).Infof("already replicated %s", key)
  136. } else {
  137. // find out what changed
  138. deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
  139. if err != nil {
  140. return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
  141. }
  142. // delete the chunks that are deleted from the source
  143. if deleteIncludeChunks {
  144. // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
  145. existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
  146. }
  147. // replicate the chunks that are new in the source
  148. replicatedChunks, err := fs.replicateChunks(newChunks, key)
  149. if err != nil {
  150. return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
  151. }
  152. existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
  153. }
  154. // save updated meta data
  155. return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  156. request := &filer_pb.UpdateEntryRequest{
  157. Directory: newParentPath,
  158. Entry: existingEntry,
  159. IsFromOtherCluster: true,
  160. Signatures: signatures,
  161. }
  162. if _, err := client.UpdateEntry(context.Background(), request); err != nil {
  163. return fmt.Errorf("update existingEntry %s: %v", key, err)
  164. }
  165. return nil
  166. })
  167. }
  168. func compareChunks(lookupFileIdFn filer.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
  169. aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
  170. if aErr != nil {
  171. return nil, nil, aErr
  172. }
  173. bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
  174. if bErr != nil {
  175. return nil, nil, bErr
  176. }
  177. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
  178. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
  179. newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
  180. newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
  181. return
  182. }