You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

237 lines
7.0 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package filersink
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/pb"
  6. "github.com/chrislusf/seaweedfs/weed/wdclient"
  7. "google.golang.org/grpc"
  8. "github.com/chrislusf/seaweedfs/weed/security"
  9. "github.com/chrislusf/seaweedfs/weed/filer"
  10. "github.com/chrislusf/seaweedfs/weed/glog"
  11. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  12. "github.com/chrislusf/seaweedfs/weed/replication/sink"
  13. "github.com/chrislusf/seaweedfs/weed/replication/source"
  14. "github.com/chrislusf/seaweedfs/weed/util"
  15. )
  16. type FilerSink struct {
  17. filerSource *source.FilerSource
  18. grpcAddress string
  19. dir string
  20. replication string
  21. collection string
  22. ttlSec int32
  23. dataCenter string
  24. grpcDialOption grpc.DialOption
  25. address string
  26. writeChunkByFiler bool
  27. }
  28. func init() {
  29. sink.Sinks = append(sink.Sinks, &FilerSink{})
  30. }
  31. func (fs *FilerSink) GetName() string {
  32. return "filer"
  33. }
  34. func (fs *FilerSink) GetSinkToDirectory() string {
  35. return fs.dir
  36. }
  37. func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
  38. return fs.DoInitialize(
  39. "",
  40. configuration.GetString(prefix+"grpcAddress"),
  41. configuration.GetString(prefix+"directory"),
  42. configuration.GetString(prefix+"replication"),
  43. configuration.GetString(prefix+"collection"),
  44. configuration.GetInt(prefix+"ttlSec"),
  45. security.LoadClientTLS(util.GetViper(), "grpc.client"),
  46. false)
  47. }
  48. func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
  49. fs.filerSource = s
  50. }
  51. func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
  52. replication string, collection string, ttlSec int, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
  53. fs.address = address
  54. if fs.address == "" {
  55. fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
  56. }
  57. fs.grpcAddress = grpcAddress
  58. fs.dir = dir
  59. fs.replication = replication
  60. fs.collection = collection
  61. fs.ttlSec = int32(ttlSec)
  62. fs.grpcDialOption = grpcDialOption
  63. fs.writeChunkByFiler = writeChunkByFiler
  64. return nil
  65. }
  66. func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
  67. dir, name := util.FullPath(key).DirAndName()
  68. glog.V(4).Infof("delete entry: %v", key)
  69. err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
  70. if err != nil {
  71. glog.V(0).Infof("delete entry %s: %v", key, err)
  72. return fmt.Errorf("delete entry %s: %v", key, err)
  73. }
  74. return nil
  75. }
  76. func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
  77. return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  78. dir, name := util.FullPath(key).DirAndName()
  79. // look up existing entry
  80. lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
  81. Directory: dir,
  82. Name: name,
  83. }
  84. glog.V(1).Infof("lookup: %v", lookupRequest)
  85. if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
  86. if filer.ETag(resp.Entry) == filer.ETag(entry) {
  87. glog.V(3).Infof("already replicated %s", key)
  88. return nil
  89. }
  90. }
  91. replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
  92. if err != nil {
  93. // only warning here since the source chunk may have been deleted already
  94. glog.Warningf("replicate entry chunks %s: %v", key, err)
  95. }
  96. glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
  97. request := &filer_pb.CreateEntryRequest{
  98. Directory: dir,
  99. Entry: &filer_pb.Entry{
  100. Name: name,
  101. IsDirectory: entry.IsDirectory,
  102. Attributes: entry.Attributes,
  103. Chunks: replicatedChunks,
  104. Content: entry.Content,
  105. },
  106. IsFromOtherCluster: true,
  107. Signatures: signatures,
  108. }
  109. glog.V(3).Infof("create: %v", request)
  110. if err := filer_pb.CreateEntry(client, request); err != nil {
  111. glog.V(0).Infof("create entry %s: %v", key, err)
  112. return fmt.Errorf("create entry %s: %v", key, err)
  113. }
  114. return nil
  115. })
  116. }
  117. func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
  118. dir, name := util.FullPath(key).DirAndName()
  119. // read existing entry
  120. var existingEntry *filer_pb.Entry
  121. err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  122. request := &filer_pb.LookupDirectoryEntryRequest{
  123. Directory: dir,
  124. Name: name,
  125. }
  126. glog.V(4).Infof("lookup entry: %v", request)
  127. resp, err := filer_pb.LookupEntry(client, request)
  128. if err != nil {
  129. glog.V(0).Infof("lookup %s: %v", key, err)
  130. return err
  131. }
  132. existingEntry = resp.Entry
  133. return nil
  134. })
  135. if err != nil {
  136. return false, fmt.Errorf("lookup %s: %v", key, err)
  137. }
  138. glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
  139. if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
  140. // skip if already changed
  141. // this usually happens when the messages are not ordered
  142. glog.V(2).Infof("late updates %s", key)
  143. } else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
  144. // skip if no change
  145. // this usually happens when retrying the replication
  146. glog.V(3).Infof("already replicated %s", key)
  147. } else {
  148. // find out what changed
  149. deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
  150. if err != nil {
  151. return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
  152. }
  153. // delete the chunks that are deleted from the source
  154. if deleteIncludeChunks {
  155. // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
  156. existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
  157. }
  158. // replicate the chunks that are new in the source
  159. replicatedChunks, err := fs.replicateChunks(newChunks, key)
  160. if err != nil {
  161. return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
  162. }
  163. existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
  164. }
  165. // save updated meta data
  166. return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  167. request := &filer_pb.UpdateEntryRequest{
  168. Directory: newParentPath,
  169. Entry: existingEntry,
  170. IsFromOtherCluster: true,
  171. Signatures: signatures,
  172. }
  173. if _, err := client.UpdateEntry(context.Background(), request); err != nil {
  174. return fmt.Errorf("update existingEntry %s: %v", key, err)
  175. }
  176. return nil
  177. })
  178. }
  179. func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
  180. aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
  181. if aErr != nil {
  182. return nil, nil, aErr
  183. }
  184. bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
  185. if bErr != nil {
  186. return nil, nil, bErr
  187. }
  188. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
  189. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
  190. newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
  191. newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
  192. return
  193. }