You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

198 lines
5.7 KiB

4 years ago
5 years ago
  1. package chunk_cache
  2. import (
  3. "fmt"
  4. "os"
  5. "time"
  6. "github.com/syndtr/goleveldb/leveldb/opt"
  7. "github.com/seaweedfs/seaweedfs/weed/glog"
  8. "github.com/seaweedfs/seaweedfs/weed/storage"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  11. "github.com/seaweedfs/seaweedfs/weed/util"
  12. )
  13. // This implements an on disk cache
  14. // The entries are an FIFO with a size limit
  15. type ChunkCacheVolume struct {
  16. DataBackend backend.BackendStorageFile
  17. nm storage.NeedleMapper
  18. fileName string
  19. smallBuffer []byte
  20. sizeLimit int64
  21. lastModTime time.Time
  22. fileSize int64
  23. }
  24. func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) {
  25. v := &ChunkCacheVolume{
  26. smallBuffer: make([]byte, types.NeedlePaddingSize),
  27. fileName: fileName,
  28. sizeLimit: preallocate,
  29. }
  30. var err error
  31. if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists {
  32. if !canRead {
  33. return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName)
  34. }
  35. if !canWrite {
  36. return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName)
  37. }
  38. if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil {
  39. return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
  40. } else {
  41. v.DataBackend = backend.NewDiskFile(dataFile)
  42. v.lastModTime = modTime
  43. v.fileSize = fileSize
  44. }
  45. } else {
  46. if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil {
  47. return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
  48. }
  49. v.lastModTime = time.Now()
  50. }
  51. var indexFile *os.File
  52. if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
  53. return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
  54. }
  55. glog.V(1).Infoln("loading leveldb", v.fileName+".ldb")
  56. opts := &opt.Options{
  57. BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
  58. WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
  59. CompactionTableSizeMultiplier: 10, // default value is 1
  60. }
  61. if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts, 0); err != nil {
  62. return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err)
  63. }
  64. return v, nil
  65. }
  66. func (v *ChunkCacheVolume) Shutdown() {
  67. if v.DataBackend != nil {
  68. v.DataBackend.Close()
  69. v.DataBackend = nil
  70. }
  71. if v.nm != nil {
  72. v.nm.Close()
  73. v.nm = nil
  74. }
  75. }
  76. func (v *ChunkCacheVolume) doReset() {
  77. v.Shutdown()
  78. os.Truncate(v.fileName+".dat", 0)
  79. os.Truncate(v.fileName+".idx", 0)
  80. glog.V(4).Infof("cache removeAll %s ...", v.fileName+".ldb")
  81. os.RemoveAll(v.fileName + ".ldb")
  82. glog.V(4).Infof("cache removed %s", v.fileName+".ldb")
  83. }
  84. func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
  85. v.doReset()
  86. return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
  87. }
  88. func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
  89. nv, ok := v.nm.Get(key)
  90. if !ok {
  91. return nil, storage.ErrorNotFound
  92. }
  93. data := make([]byte, nv.Size)
  94. if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil {
  95. if readSize != int(nv.Size) {
  96. return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
  97. v.fileName, nv.Offset.ToActualOffset(), nv.Offset.ToActualOffset()+int64(nv.Size), readErr)
  98. }
  99. } else {
  100. if readSize != int(nv.Size) {
  101. return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
  102. }
  103. }
  104. return data, nil
  105. }
  106. func (v *ChunkCacheVolume) getNeedleSlice(key types.NeedleId, offset, length uint64) ([]byte, error) {
  107. nv, ok := v.nm.Get(key)
  108. if !ok {
  109. return nil, storage.ErrorNotFound
  110. }
  111. wanted := min(int(length), int(nv.Size)-int(offset))
  112. if wanted < 0 {
  113. // should never happen, but better than panicking
  114. return nil, ErrorOutOfBounds
  115. }
  116. data := make([]byte, wanted)
  117. if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); readErr != nil {
  118. if readSize != wanted {
  119. return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
  120. v.fileName, nv.Offset.ToActualOffset()+int64(offset), int(nv.Offset.ToActualOffset())+int(offset)+wanted, readErr)
  121. }
  122. } else {
  123. if readSize != wanted {
  124. return nil, fmt.Errorf("read %d, expected %d", readSize, wanted)
  125. }
  126. }
  127. return data, nil
  128. }
  129. func (v *ChunkCacheVolume) readNeedleSliceAt(data []byte, key types.NeedleId, offset uint64) (n int, err error) {
  130. nv, ok := v.nm.Get(key)
  131. if !ok {
  132. return 0, storage.ErrorNotFound
  133. }
  134. wanted := min(len(data), int(nv.Size)-int(offset))
  135. if wanted < 0 {
  136. // should never happen, but better than panicking
  137. return 0, ErrorOutOfBounds
  138. }
  139. if n, err = v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); err != nil {
  140. if n != wanted {
  141. return n, fmt.Errorf("read %s.dat [%d,%d): %v",
  142. v.fileName, nv.Offset.ToActualOffset()+int64(offset), int(nv.Offset.ToActualOffset())+int(offset)+wanted, err)
  143. }
  144. } else {
  145. if n != wanted {
  146. return n, fmt.Errorf("read %d, expected %d", n, wanted)
  147. }
  148. }
  149. return n, nil
  150. }
  151. func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
  152. offset := v.fileSize
  153. written, err := v.DataBackend.WriteAt(data, offset)
  154. if err != nil {
  155. return err
  156. } else if written != len(data) {
  157. return fmt.Errorf("partial written %d, expected %d", written, len(data))
  158. }
  159. v.fileSize += int64(written)
  160. extraSize := written % types.NeedlePaddingSize
  161. if extraSize != 0 {
  162. v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written))
  163. v.fileSize += int64(types.NeedlePaddingSize - extraSize)
  164. }
  165. if err := v.nm.Put(key, types.ToOffset(offset), types.Size(len(data))); err != nil {
  166. return err
  167. }
  168. return nil
  169. }