You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

240 lines
7.1 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
4 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package filersink
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/pb"
  6. "github.com/chrislusf/seaweedfs/weed/wdclient"
  7. "google.golang.org/grpc"
  8. "github.com/chrislusf/seaweedfs/weed/security"
  9. "github.com/chrislusf/seaweedfs/weed/filer"
  10. "github.com/chrislusf/seaweedfs/weed/glog"
  11. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  12. "github.com/chrislusf/seaweedfs/weed/replication/sink"
  13. "github.com/chrislusf/seaweedfs/weed/replication/source"
  14. "github.com/chrislusf/seaweedfs/weed/util"
  15. )
  16. type FilerSink struct {
  17. filerSource *source.FilerSource
  18. grpcAddress string
  19. dir string
  20. replication string
  21. collection string
  22. ttlSec int32
  23. diskType string
  24. dataCenter string
  25. grpcDialOption grpc.DialOption
  26. address string
  27. writeChunkByFiler bool
  28. }
  29. func init() {
  30. sink.Sinks = append(sink.Sinks, &FilerSink{})
  31. }
  32. func (fs *FilerSink) GetName() string {
  33. return "filer"
  34. }
  35. func (fs *FilerSink) GetSinkToDirectory() string {
  36. return fs.dir
  37. }
  38. func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
  39. return fs.DoInitialize(
  40. "",
  41. configuration.GetString(prefix+"grpcAddress"),
  42. configuration.GetString(prefix+"directory"),
  43. configuration.GetString(prefix+"replication"),
  44. configuration.GetString(prefix+"collection"),
  45. configuration.GetInt(prefix+"ttlSec"),
  46. configuration.GetString(prefix+"disk"),
  47. security.LoadClientTLS(util.GetViper(), "grpc.client"),
  48. false)
  49. }
  50. func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
  51. fs.filerSource = s
  52. }
  53. func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
  54. replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
  55. fs.address = address
  56. if fs.address == "" {
  57. fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
  58. }
  59. fs.grpcAddress = grpcAddress
  60. fs.dir = dir
  61. fs.replication = replication
  62. fs.collection = collection
  63. fs.ttlSec = int32(ttlSec)
  64. fs.diskType = diskType
  65. fs.grpcDialOption = grpcDialOption
  66. fs.writeChunkByFiler = writeChunkByFiler
  67. return nil
  68. }
  69. func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
  70. dir, name := util.FullPath(key).DirAndName()
  71. glog.V(4).Infof("delete entry: %v", key)
  72. err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
  73. if err != nil {
  74. glog.V(0).Infof("delete entry %s: %v", key, err)
  75. return fmt.Errorf("delete entry %s: %v", key, err)
  76. }
  77. return nil
  78. }
  79. func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
  80. return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  81. dir, name := util.FullPath(key).DirAndName()
  82. // look up existing entry
  83. lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
  84. Directory: dir,
  85. Name: name,
  86. }
  87. glog.V(1).Infof("lookup: %v", lookupRequest)
  88. if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
  89. if filer.ETag(resp.Entry) == filer.ETag(entry) {
  90. glog.V(3).Infof("already replicated %s", key)
  91. return nil
  92. }
  93. }
  94. replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
  95. if err != nil {
  96. // only warning here since the source chunk may have been deleted already
  97. glog.Warningf("replicate entry chunks %s: %v", key, err)
  98. }
  99. glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
  100. request := &filer_pb.CreateEntryRequest{
  101. Directory: dir,
  102. Entry: &filer_pb.Entry{
  103. Name: name,
  104. IsDirectory: entry.IsDirectory,
  105. Attributes: entry.Attributes,
  106. Chunks: replicatedChunks,
  107. Content: entry.Content,
  108. },
  109. IsFromOtherCluster: true,
  110. Signatures: signatures,
  111. }
  112. glog.V(3).Infof("create: %v", request)
  113. if err := filer_pb.CreateEntry(client, request); err != nil {
  114. glog.V(0).Infof("create entry %s: %v", key, err)
  115. return fmt.Errorf("create entry %s: %v", key, err)
  116. }
  117. return nil
  118. })
  119. }
  120. func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
  121. dir, name := util.FullPath(key).DirAndName()
  122. // read existing entry
  123. var existingEntry *filer_pb.Entry
  124. err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  125. request := &filer_pb.LookupDirectoryEntryRequest{
  126. Directory: dir,
  127. Name: name,
  128. }
  129. glog.V(4).Infof("lookup entry: %v", request)
  130. resp, err := filer_pb.LookupEntry(client, request)
  131. if err != nil {
  132. glog.V(0).Infof("lookup %s: %v", key, err)
  133. return err
  134. }
  135. existingEntry = resp.Entry
  136. return nil
  137. })
  138. if err != nil {
  139. return false, fmt.Errorf("lookup %s: %v", key, err)
  140. }
  141. glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
  142. if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
  143. // skip if already changed
  144. // this usually happens when the messages are not ordered
  145. glog.V(2).Infof("late updates %s", key)
  146. } else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
  147. // skip if no change
  148. // this usually happens when retrying the replication
  149. glog.V(3).Infof("already replicated %s", key)
  150. } else {
  151. // find out what changed
  152. deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
  153. if err != nil {
  154. return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err)
  155. }
  156. // delete the chunks that are deleted from the source
  157. if deleteIncludeChunks {
  158. // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
  159. existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks)
  160. }
  161. // replicate the chunks that are new in the source
  162. replicatedChunks, err := fs.replicateChunks(newChunks, key)
  163. if err != nil {
  164. return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
  165. }
  166. existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
  167. }
  168. // save updated meta data
  169. return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  170. request := &filer_pb.UpdateEntryRequest{
  171. Directory: newParentPath,
  172. Entry: existingEntry,
  173. IsFromOtherCluster: true,
  174. Signatures: signatures,
  175. }
  176. if _, err := client.UpdateEntry(context.Background(), request); err != nil {
  177. return fmt.Errorf("update existingEntry %s: %v", key, err)
  178. }
  179. return nil
  180. })
  181. }
  182. func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
  183. aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks)
  184. if aErr != nil {
  185. return nil, nil, aErr
  186. }
  187. bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks)
  188. if bErr != nil {
  189. return nil, nil, bErr
  190. }
  191. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
  192. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
  193. newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
  194. newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
  195. return
  196. }