You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

247 lines
7.3 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
4 years ago
4 years ago
6 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package filersink
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/pb"
  6. "github.com/chrislusf/seaweedfs/weed/wdclient"
  7. "google.golang.org/grpc"
  8. "github.com/chrislusf/seaweedfs/weed/security"
  9. "github.com/chrislusf/seaweedfs/weed/filer"
  10. "github.com/chrislusf/seaweedfs/weed/glog"
  11. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  12. "github.com/chrislusf/seaweedfs/weed/replication/sink"
  13. "github.com/chrislusf/seaweedfs/weed/replication/source"
  14. "github.com/chrislusf/seaweedfs/weed/util"
  15. )
  16. type FilerSink struct {
  17. filerSource *source.FilerSource
  18. grpcAddress string
  19. dir string
  20. replication string
  21. collection string
  22. ttlSec int32
  23. diskType string
  24. dataCenter string
  25. grpcDialOption grpc.DialOption
  26. address string
  27. writeChunkByFiler bool
  28. isIncremental bool
  29. }
  30. func init() {
  31. sink.Sinks = append(sink.Sinks, &FilerSink{})
  32. }
  33. func (fs *FilerSink) GetName() string {
  34. return "filer"
  35. }
  36. func (fs *FilerSink) GetSinkToDirectory() string {
  37. return fs.dir
  38. }
  39. func (fs *FilerSink) IsIncremental() bool {
  40. return fs.isIncremental
  41. }
  42. func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
  43. fs.isIncremental = configuration.GetBool(prefix + "is_incremental")
  44. return fs.DoInitialize(
  45. "",
  46. configuration.GetString(prefix+"grpcAddress"),
  47. configuration.GetString(prefix+"directory"),
  48. configuration.GetString(prefix+"replication"),
  49. configuration.GetString(prefix+"collection"),
  50. configuration.GetInt(prefix+"ttlSec"),
  51. configuration.GetString(prefix+"disk"),
  52. security.LoadClientTLS(util.GetViper(), "grpc.client"),
  53. false)
  54. }
  55. func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
  56. fs.filerSource = s
  57. }
  58. func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
  59. replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
  60. fs.address = address
  61. if fs.address == "" {
  62. fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
  63. }
  64. fs.grpcAddress = grpcAddress
  65. fs.dir = dir
  66. fs.replication = replication
  67. fs.collection = collection
  68. fs.ttlSec = int32(ttlSec)
  69. fs.diskType = diskType
  70. fs.grpcDialOption = grpcDialOption
  71. fs.writeChunkByFiler = writeChunkByFiler
  72. return nil
  73. }
  74. func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
  75. dir, name := util.FullPath(key).DirAndName()
  76. glog.V(4).Infof("delete entry: %v", key)
  77. err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
  78. if err != nil {
  79. glog.V(0).Infof("delete entry %s: %v", key, err)
  80. return fmt.Errorf("delete entry %s: %v", key, err)
  81. }
  82. return nil
  83. }
  84. func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
  85. return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  86. dir, name := util.FullPath(key).DirAndName()
  87. // look up existing entry
  88. lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
  89. Directory: dir,
  90. Name: name,
  91. }
  92. glog.V(1).Infof("lookup: %v", lookupRequest)
  93. if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
  94. if filer.ETag(resp.Entry) == filer.ETag(entry) {
  95. glog.V(3).Infof("already replicated %s", key)
  96. return nil
  97. }
  98. }
  99. replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
  100. if err != nil {
  101. // only warning here since the source chunk may have been deleted already
  102. glog.Warningf("replicate entry chunks %s: %v", key, err)
  103. }
  104. glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
  105. request := &filer_pb.CreateEntryRequest{
  106. Directory: dir,
  107. Entry: &filer_pb.Entry{
  108. Name: name,
  109. IsDirectory: entry.IsDirectory,
  110. Attributes: entry.Attributes,
  111. Chunks: replicatedChunks,
  112. Content: entry.Content,
  113. Remote: entry.Remote,
  114. },
  115. IsFromOtherCluster: true,
  116. Signatures: signatures,
  117. }
  118. glog.V(3).Infof("create: %v", request)
  119. if err := filer_pb.CreateEntry(client, request); err != nil {
  120. glog.V(0).Infof("create entry %s: %v", key, err)
  121. return fmt.Errorf("create entry %s: %v", key, err)
  122. }
  123. return nil
  124. })
  125. }
  126. func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
  127. dir, name := util.FullPath(key).DirAndName()
  128. // read existing entry
  129. var existingEntry *filer_pb.Entry
  130. err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  131. request := &filer_pb.LookupDirectoryEntryRequest{
  132. Directory: dir,
  133. Name: name,
  134. }
  135. glog.V(4).Infof("lookup entry: %v", request)
  136. resp, err := filer_pb.LookupEntry(client, request)
  137. if err != nil {
  138. glog.V(0).Infof("lookup %s: %v", key, err)
  139. return err
  140. }
  141. existingEntry = resp.Entry
  142. return nil
  143. })
  144. if err != nil {
  145. return false, fmt.Errorf("lookup %s: %v", key, err)
  146. }
  147. glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
  148. if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
  149. // skip if already changed
  150. // this usually happens when the messages are not ordered
  151. glog.V(2).Infof("late updates %s", key)
  152. } else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
  153. // skip if no change
  154. // this usually happens when retrying the replication
  155. glog.V(3).Infof("already replicated %s", key)
  156. } else {
  157. // find out what changed
  158. deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
  159. if err != nil {
  160. return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
  161. }
  162. // delete the chunks that are deleted from the source
  163. if deleteIncludeChunks {
  164. // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
  165. existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
  166. }
  167. // replicate the chunks that are new in the source
  168. replicatedChunks, err := fs.replicateChunks(newChunks, key)
  169. if err != nil {
  170. return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
  171. }
  172. existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
  173. }
  174. // save updated meta data
  175. return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  176. request := &filer_pb.UpdateEntryRequest{
  177. Directory: newParentPath,
  178. Entry: existingEntry,
  179. IsFromOtherCluster: true,
  180. Signatures: signatures,
  181. }
  182. if _, err := client.UpdateEntry(context.Background(), request); err != nil {
  183. return fmt.Errorf("update existingEntry %s: %v", key, err)
  184. }
  185. return nil
  186. })
  187. }
  188. func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
  189. aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
  190. if aErr != nil {
  191. return nil, nil, aErr
  192. }
  193. bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
  194. if bErr != nil {
  195. return nil, nil, bErr
  196. }
  197. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
  198. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
  199. newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
  200. newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
  201. return
  202. }