You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

216 lines
6.4 KiB

5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
5 years ago
5 years ago
5 years ago
9 months ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package filer
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
  6. "io"
  7. "math"
  8. "regexp"
  9. "strings"
  10. "time"
  11. "google.golang.org/protobuf/proto"
  12. "github.com/seaweedfs/seaweedfs/weed/glog"
  13. "github.com/seaweedfs/seaweedfs/weed/notification"
  14. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  15. "github.com/seaweedfs/seaweedfs/weed/util"
  16. )
  17. func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) {
  18. var fullpath string
  19. if oldEntry != nil {
  20. fullpath = string(oldEntry.FullPath)
  21. } else if newEntry != nil {
  22. fullpath = string(newEntry.FullPath)
  23. } else {
  24. return
  25. }
  26. // println("fullpath:", fullpath)
  27. if strings.HasPrefix(fullpath, SystemLogDir) {
  28. return
  29. }
  30. foundSelf := false
  31. for _, sig := range signatures {
  32. if sig == f.Signature {
  33. foundSelf = true
  34. }
  35. }
  36. if !foundSelf {
  37. signatures = append(signatures, f.Signature)
  38. }
  39. newParentPath := ""
  40. if newEntry != nil {
  41. newParentPath, _ = newEntry.FullPath.DirAndName()
  42. }
  43. eventNotification := &filer_pb.EventNotification{
  44. OldEntry: oldEntry.ToProtoEntry(),
  45. NewEntry: newEntry.ToProtoEntry(),
  46. DeleteChunks: deleteChunks,
  47. NewParentPath: newParentPath,
  48. IsFromOtherCluster: isFromOtherCluster,
  49. Signatures: signatures,
  50. }
  51. if notification.Queue != nil {
  52. glog.V(3).Infof("notifying entry update %v", fullpath)
  53. if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
  54. // throw message
  55. glog.Error(err)
  56. }
  57. }
  58. f.logMetaEvent(ctx, fullpath, eventNotification)
  59. }
  60. func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
  61. dir, _ := util.FullPath(fullpath).DirAndName()
  62. event := &filer_pb.SubscribeMetadataResponse{
  63. Directory: dir,
  64. EventNotification: eventNotification,
  65. TsNs: time.Now().UnixNano(),
  66. }
  67. data, err := proto.Marshal(event)
  68. if err != nil {
  69. glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
  70. return
  71. }
  72. f.LocalMetaLogBuffer.AddDataToBuffer([]byte(dir), data, event.TsNs)
  73. }
  74. func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte) {
  75. if len(buf) == 0 {
  76. return
  77. }
  78. startTime, stopTime = startTime.UTC(), stopTime.UTC()
  79. targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.%08x", SystemLogDir,
  80. startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFilerId,
  81. // startTime.Second(), startTime.Nanosecond(),
  82. )
  83. for {
  84. if err := f.appendToFile(targetFile, buf); err != nil {
  85. glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err)
  86. time.Sleep(737 * time.Millisecond)
  87. } else {
  88. break
  89. }
  90. }
  91. }
  92. var (
  93. VolumeNotFoundPattern = regexp.MustCompile(`volume \d+? not found`)
  94. )
  95. func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastTsNs int64, isDone bool, err error) {
  96. startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Year(), startPosition.Month(), startPosition.Day())
  97. startHourMinute := fmt.Sprintf("%02d-%02d", startPosition.Hour(), startPosition.Minute())
  98. var stopDate, stopHourMinute string
  99. if stopTsNs != 0 {
  100. stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC()
  101. stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day())
  102. stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute())
  103. }
  104. sizeBuf := make([]byte, 4)
  105. startTsNs := startPosition.UnixNano()
  106. dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "")
  107. if listDayErr != nil {
  108. return lastTsNs, isDone, fmt.Errorf("fail to list log by day: %v", listDayErr)
  109. }
  110. for _, dayEntry := range dayEntries {
  111. if stopDate != "" {
  112. if strings.Compare(dayEntry.Name(), stopDate) > 0 {
  113. break
  114. }
  115. }
  116. // println("checking day", dayEntry.FullPath)
  117. hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "")
  118. if listHourMinuteErr != nil {
  119. return lastTsNs, isDone, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
  120. }
  121. for _, hourMinuteEntry := range hourMinuteEntries {
  122. // println("checking hh-mm", hourMinuteEntry.FullPath)
  123. if dayEntry.Name() == startDate {
  124. hourMinute := util.FileNameBase(hourMinuteEntry.Name())
  125. if strings.Compare(hourMinute, startHourMinute) < 0 {
  126. continue
  127. }
  128. }
  129. if dayEntry.Name() == stopDate {
  130. hourMinute := util.FileNameBase(hourMinuteEntry.Name())
  131. if strings.Compare(hourMinute, stopHourMinute) > 0 {
  132. break
  133. }
  134. }
  135. // println("processing", hourMinuteEntry.FullPath)
  136. chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.GetChunks())
  137. if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil {
  138. chunkedFileReader.Close()
  139. if err == io.EOF {
  140. continue
  141. }
  142. if VolumeNotFoundPattern.MatchString(err.Error()) {
  143. glog.Warningf("skipping reading %s: %v", hourMinuteEntry.FullPath, err)
  144. continue
  145. }
  146. return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
  147. }
  148. chunkedFileReader.Close()
  149. }
  150. }
  151. return lastTsNs, isDone, nil
  152. }
  153. func ReadEachLogEntry(r io.Reader, sizeBuf []byte, startTsNs, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastTsNs int64, err error) {
  154. for {
  155. n, err := r.Read(sizeBuf)
  156. if err != nil {
  157. return lastTsNs, err
  158. }
  159. if n != 4 {
  160. return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
  161. }
  162. size := util.BytesToUint32(sizeBuf)
  163. // println("entry size", size)
  164. entryData := make([]byte, size)
  165. n, err = r.Read(entryData)
  166. if err != nil {
  167. return lastTsNs, err
  168. }
  169. if n != int(size) {
  170. return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
  171. }
  172. logEntry := &filer_pb.LogEntry{}
  173. if err = proto.Unmarshal(entryData, logEntry); err != nil {
  174. return lastTsNs, err
  175. }
  176. if logEntry.TsNs <= startTsNs {
  177. continue
  178. }
  179. if stopTsNs != 0 && logEntry.TsNs > stopTsNs {
  180. return lastTsNs, err
  181. }
  182. // println("each log: ", logEntry.TsNs)
  183. if _, err := eachLogEntryFn(logEntry); err != nil {
  184. return lastTsNs, err
  185. } else {
  186. lastTsNs = logEntry.TsNs
  187. }
  188. }
  189. }