You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

205 lines
5.8 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
5 years ago
6 years ago
  1. package filersink
  2. import (
  3. "context"
  4. "fmt"
  5. "google.golang.org/grpc"
  6. "github.com/chrislusf/seaweedfs/weed/security"
  7. "github.com/chrislusf/seaweedfs/weed/filer2"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  10. "github.com/chrislusf/seaweedfs/weed/replication/sink"
  11. "github.com/chrislusf/seaweedfs/weed/replication/source"
  12. "github.com/chrislusf/seaweedfs/weed/util"
  13. )
  14. type FilerSink struct {
  15. filerSource *source.FilerSource
  16. grpcAddress string
  17. dir string
  18. replication string
  19. collection string
  20. ttlSec int32
  21. dataCenter string
  22. grpcDialOption grpc.DialOption
  23. }
  24. func init() {
  25. sink.Sinks = append(sink.Sinks, &FilerSink{})
  26. }
  27. func (fs *FilerSink) GetName() string {
  28. return "filer"
  29. }
  30. func (fs *FilerSink) GetSinkToDirectory() string {
  31. return fs.dir
  32. }
  33. func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
  34. return fs.initialize(
  35. configuration.GetString(prefix+"grpcAddress"),
  36. configuration.GetString(prefix+"directory"),
  37. configuration.GetString(prefix+"replication"),
  38. configuration.GetString(prefix+"collection"),
  39. configuration.GetInt(prefix+"ttlSec"),
  40. )
  41. }
  42. func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
  43. fs.filerSource = s
  44. }
  45. func (fs *FilerSink) initialize(grpcAddress string, dir string,
  46. replication string, collection string, ttlSec int) (err error) {
  47. fs.grpcAddress = grpcAddress
  48. fs.dir = dir
  49. fs.replication = replication
  50. fs.collection = collection
  51. fs.ttlSec = int32(ttlSec)
  52. fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
  53. return nil
  54. }
  55. func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
  56. dir, name := util.FullPath(key).DirAndName()
  57. glog.V(1).Infof("delete entry: %v", key)
  58. err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, false, false)
  59. if err != nil {
  60. glog.V(0).Infof("delete entry %s: %v", key, err)
  61. return fmt.Errorf("delete entry %s: %v", key, err)
  62. }
  63. return nil
  64. }
  65. func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
  66. return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  67. dir, name := util.FullPath(key).DirAndName()
  68. // look up existing entry
  69. lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
  70. Directory: dir,
  71. Name: name,
  72. }
  73. glog.V(1).Infof("lookup: %v", lookupRequest)
  74. if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
  75. if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
  76. glog.V(0).Infof("already replicated %s", key)
  77. return nil
  78. }
  79. }
  80. replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir)
  81. if err != nil {
  82. glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
  83. return fmt.Errorf("replicate entry chunks %s: %v", key, err)
  84. }
  85. glog.V(0).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
  86. request := &filer_pb.CreateEntryRequest{
  87. Directory: dir,
  88. Entry: &filer_pb.Entry{
  89. Name: name,
  90. IsDirectory: entry.IsDirectory,
  91. Attributes: entry.Attributes,
  92. Chunks: replicatedChunks,
  93. },
  94. }
  95. glog.V(1).Infof("create: %v", request)
  96. if err := filer_pb.CreateEntry(client, request); err != nil {
  97. glog.V(0).Infof("create entry %s: %v", key, err)
  98. return fmt.Errorf("create entry %s: %v", key, err)
  99. }
  100. return nil
  101. })
  102. }
  103. func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
  104. dir, name := util.FullPath(key).DirAndName()
  105. // read existing entry
  106. var existingEntry *filer_pb.Entry
  107. err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  108. request := &filer_pb.LookupDirectoryEntryRequest{
  109. Directory: dir,
  110. Name: name,
  111. }
  112. glog.V(4).Infof("lookup entry: %v", request)
  113. resp, err := filer_pb.LookupEntry(client, request)
  114. if err != nil {
  115. glog.V(0).Infof("lookup %s: %v", key, err)
  116. return err
  117. }
  118. existingEntry = resp.Entry
  119. return nil
  120. })
  121. if err != nil {
  122. return false, fmt.Errorf("lookup %s: %v", key, err)
  123. }
  124. glog.V(0).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
  125. if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
  126. // skip if already changed
  127. // this usually happens when the messages are not ordered
  128. glog.V(0).Infof("late updates %s", key)
  129. } else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
  130. // skip if no change
  131. // this usually happens when retrying the replication
  132. glog.V(0).Infof("already replicated %s", key)
  133. } else {
  134. // find out what changed
  135. deletedChunks, newChunks := compareChunks(oldEntry, newEntry)
  136. // delete the chunks that are deleted from the source
  137. if deleteIncludeChunks {
  138. // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
  139. existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks)
  140. }
  141. // replicate the chunks that are new in the source
  142. replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath)
  143. if err != nil {
  144. return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
  145. }
  146. existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
  147. }
  148. // save updated meta data
  149. return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  150. request := &filer_pb.UpdateEntryRequest{
  151. Directory: newParentPath,
  152. Entry: existingEntry,
  153. }
  154. if _, err := client.UpdateEntry(context.Background(), request); err != nil {
  155. return fmt.Errorf("update existingEntry %s: %v", key, err)
  156. }
  157. return nil
  158. })
  159. }
  160. func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) {
  161. deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks)
  162. newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks)
  163. return
  164. }