You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

275 lines
8.3 KiB

2 years ago
  1. package storage
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/util/mem"
  5. "io"
  6. "time"
  7. "github.com/seaweedfs/seaweedfs/weed/glog"
  8. "github.com/seaweedfs/seaweedfs/weed/stats"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  11. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  12. . "github.com/seaweedfs/seaweedfs/weed/storage/types"
  13. )
  14. const PagedReadLimit = 1024 * 1024
  15. // read fills in Needle content by looking up n.Id from NeedleMapper
  16. func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption, onReadSizeFn func(size Size)) (count int, err error) {
  17. v.dataFileAccessLock.RLock()
  18. defer v.dataFileAccessLock.RUnlock()
  19. nv, ok := v.nm.Get(n.Id)
  20. if !ok || nv.Offset.IsZero() {
  21. return -1, ErrorNotFound
  22. }
  23. readSize := nv.Size
  24. if readSize.IsDeleted() {
  25. if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize {
  26. glog.V(3).Infof("reading deleted %s", n.String())
  27. readSize = -readSize
  28. } else {
  29. return -1, ErrorDeleted
  30. }
  31. }
  32. if readSize == 0 {
  33. return 0, nil
  34. }
  35. if onReadSizeFn != nil {
  36. onReadSizeFn(readSize)
  37. }
  38. if readOption != nil && readOption.AttemptMetaOnly && readSize > PagedReadLimit {
  39. readOption.VolumeRevision = v.SuperBlock.CompactionRevision
  40. err = n.ReadNeedleMeta(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version())
  41. if err == needle.ErrorSizeMismatch && OffsetSize == 4 {
  42. readOption.IsOutOfRange = true
  43. err = n.ReadNeedleMeta(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version())
  44. }
  45. if err != nil {
  46. return 0, err
  47. }
  48. if !n.IsCompressed() && !n.IsChunkedManifest() {
  49. readOption.IsMetaOnly = true
  50. }
  51. }
  52. if readOption == nil || !readOption.IsMetaOnly {
  53. err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version())
  54. v.checkReadWriteError(err)
  55. if err != nil {
  56. return 0, err
  57. }
  58. }
  59. count = int(n.DataSize)
  60. if !n.HasTtl() {
  61. return
  62. }
  63. ttlMinutes := n.Ttl.Minutes()
  64. if ttlMinutes == 0 {
  65. return
  66. }
  67. if !n.HasLastModifiedDate() {
  68. return
  69. }
  70. if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) {
  71. return
  72. }
  73. return -1, ErrorNotFound
  74. }
  75. // read needle at a specific offset
  76. func (v *Volume) readNeedleMetaAt(n *needle.Needle, offset int64, size int32) (err error) {
  77. v.dataFileAccessLock.RLock()
  78. defer v.dataFileAccessLock.RUnlock()
  79. // read deleted needle meta data
  80. if size < 0 {
  81. size = 0
  82. }
  83. err = n.ReadNeedleMeta(v.DataBackend, offset, Size(size), v.Version())
  84. if err == needle.ErrorSizeMismatch && OffsetSize == 4 {
  85. err = n.ReadNeedleMeta(v.DataBackend, offset+int64(MaxPossibleVolumeSize), Size(size), v.Version())
  86. }
  87. if err != nil {
  88. return err
  89. }
  90. return nil
  91. }
  92. // read fills in Needle content by looking up n.Id from NeedleMapper
  93. func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, writer io.Writer, offset int64, size int64) (err error) {
  94. if !readOption.HasSlowRead {
  95. v.dataFileAccessLock.RLock()
  96. defer v.dataFileAccessLock.RUnlock()
  97. }
  98. if readOption.HasSlowRead {
  99. v.dataFileAccessLock.RLock()
  100. }
  101. nv, ok := v.nm.Get(n.Id)
  102. if readOption.HasSlowRead {
  103. v.dataFileAccessLock.RUnlock()
  104. }
  105. if !ok || nv.Offset.IsZero() {
  106. return ErrorNotFound
  107. }
  108. readSize := nv.Size
  109. if readSize.IsDeleted() {
  110. if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize {
  111. glog.V(3).Infof("reading deleted %s", n.String())
  112. readSize = -readSize
  113. } else {
  114. return ErrorDeleted
  115. }
  116. }
  117. if readSize == 0 {
  118. return nil
  119. }
  120. actualOffset := nv.Offset.ToActualOffset()
  121. if readOption.IsOutOfRange {
  122. actualOffset += int64(MaxPossibleVolumeSize)
  123. }
  124. buf := mem.Allocate(min(readOption.ReadBufferSize, int(size)))
  125. defer mem.Free(buf)
  126. // read needle data
  127. crc := needle.CRC(0)
  128. for x := offset; x < offset+size; x += int64(len(buf)) {
  129. if readOption.HasSlowRead {
  130. v.dataFileAccessLock.RLock()
  131. }
  132. // possibly re-read needle offset if volume is compacted
  133. if readOption.VolumeRevision != v.SuperBlock.CompactionRevision {
  134. // the volume is compacted
  135. nv, ok = v.nm.Get(n.Id)
  136. if !ok || nv.Offset.IsZero() {
  137. if readOption.HasSlowRead {
  138. v.dataFileAccessLock.RUnlock()
  139. }
  140. return ErrorNotFound
  141. }
  142. actualOffset = nv.Offset.ToActualOffset()
  143. readOption.VolumeRevision = v.SuperBlock.CompactionRevision
  144. }
  145. count, err := n.ReadNeedleData(v.DataBackend, actualOffset, buf, x)
  146. if readOption.HasSlowRead {
  147. v.dataFileAccessLock.RUnlock()
  148. }
  149. toWrite := min(count, int(offset+size-x))
  150. if toWrite > 0 {
  151. crc = crc.Update(buf[0:toWrite])
  152. if offset == 0 && size == int64(n.DataSize) && int64(count) == size && (n.Checksum != crc) {
  153. // This check works only if the buffer is big enough to hold the whole needle data
  154. // and we ask for all needle data.
  155. // Otherwise we cannot check the validity of partially aquired data.
  156. stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorCRC).Inc()
  157. return fmt.Errorf("ReadNeedleData checksum %v expected %v for Needle: %v,%v", crc, n.Checksum, v.Id, n)
  158. }
  159. if _, err = writer.Write(buf[0:toWrite]); err != nil {
  160. return fmt.Errorf("ReadNeedleData write: %v", err)
  161. }
  162. }
  163. if err != nil {
  164. if err == io.EOF {
  165. err = nil
  166. break
  167. }
  168. return fmt.Errorf("ReadNeedleData: %v", err)
  169. }
  170. if count <= 0 {
  171. break
  172. }
  173. }
  174. if offset == 0 && size == int64(n.DataSize) && (n.Checksum != crc && uint32(n.Checksum) != crc.Value()) {
  175. // the crc.Value() function is to be deprecated. this double checking is for backward compatible.
  176. stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorCRC).Inc()
  177. return fmt.Errorf("ReadNeedleData checksum %v expected %v for Needle: %v,%v", crc, n.Checksum, v.Id, n)
  178. }
  179. return nil
  180. }
  181. func min(x, y int) int {
  182. if x < y {
  183. return x
  184. }
  185. return y
  186. }
  187. // read fills in Needle content by looking up n.Id from NeedleMapper
  188. func (v *Volume) ReadNeedleBlob(offset int64, size Size) ([]byte, error) {
  189. v.dataFileAccessLock.RLock()
  190. defer v.dataFileAccessLock.RUnlock()
  191. return needle.ReadNeedleBlob(v.DataBackend, offset, size, v.Version())
  192. }
  193. type VolumeFileScanner interface {
  194. VisitSuperBlock(super_block.SuperBlock) error
  195. ReadNeedleBody() bool
  196. VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error
  197. }
  198. func ScanVolumeFile(dirname string, collection string, id needle.VolumeId,
  199. needleMapKind NeedleMapKind,
  200. volumeFileScanner VolumeFileScanner) (err error) {
  201. var v *Volume
  202. if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil {
  203. return fmt.Errorf("failed to load volume %d: %v", id, err)
  204. }
  205. if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil {
  206. return fmt.Errorf("failed to process volume %d super block: %v", id, err)
  207. }
  208. defer v.Close()
  209. version := v.Version()
  210. offset := int64(v.SuperBlock.BlockSize())
  211. return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner)
  212. }
  213. func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) {
  214. n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset)
  215. if e != nil {
  216. if e == io.EOF {
  217. return nil
  218. }
  219. return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e)
  220. }
  221. for n != nil {
  222. var needleBody []byte
  223. if volumeFileScanner.ReadNeedleBody() {
  224. // println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest)
  225. if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil {
  226. glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err)
  227. // err = fmt.Errorf("cannot read needle body: %v", err)
  228. // return
  229. }
  230. }
  231. err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody)
  232. if err == io.EOF {
  233. return nil
  234. }
  235. if err != nil {
  236. glog.V(0).Infof("visit needle error: %v", err)
  237. return fmt.Errorf("visit needle error: %v", err)
  238. }
  239. offset += NeedleHeaderSize + rest
  240. glog.V(4).Infof("==> new entry offset %d", offset)
  241. if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
  242. if err == io.EOF {
  243. return nil
  244. }
  245. return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err)
  246. }
  247. glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest)
  248. }
  249. return nil
  250. }