You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

344 lines
9.4 KiB

6 years ago
5 years ago
5 years ago
6 years ago
5 years ago
6 years ago
10 years ago
6 years ago
12 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
  1. package storage
  2. import (
  3. "fmt"
  4. "path"
  5. "strconv"
  6. "sync"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  10. "github.com/seaweedfs/seaweedfs/weed/stats"
  11. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  12. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  15. "github.com/seaweedfs/seaweedfs/weed/glog"
  16. )
  17. type Volume struct {
  18. Id needle.VolumeId
  19. dir string
  20. dirIdx string
  21. Collection string
  22. DataBackend backend.BackendStorageFile
  23. nm NeedleMapper
  24. needleMapKind NeedleMapKind
  25. noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
  26. noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
  27. noWriteLock sync.RWMutex
  28. hasRemoteFile bool // if the volume has a remote file
  29. MemoryMapMaxSizeMb uint32
  30. super_block.SuperBlock
  31. dataFileAccessLock sync.RWMutex
  32. asyncRequestsChan chan *needle.AsyncRequest
  33. lastModifiedTsSeconds uint64 // unix time in seconds
  34. lastAppendAtNs uint64 // unix time in nanoseconds
  35. lastCompactIndexOffset uint64
  36. lastCompactRevision uint16
  37. isCompacting bool
  38. isCommitCompacting bool
  39. volumeInfo *volume_server_pb.VolumeInfo
  40. location *DiskLocation
  41. lastIoError error
  42. }
  43. func NewVolume(dirname string, dirIdx string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) {
  44. // if replicaPlacement is nil, the superblock will be loaded from disk
  45. v = &Volume{dir: dirname, dirIdx: dirIdx, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
  46. asyncRequestsChan: make(chan *needle.AsyncRequest, 128)}
  47. v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl}
  48. v.needleMapKind = needleMapKind
  49. e = v.load(true, true, needleMapKind, preallocate)
  50. v.startWorker()
  51. return
  52. }
  53. func (v *Volume) String() string {
  54. v.noWriteLock.RLock()
  55. defer v.noWriteLock.RUnlock()
  56. return fmt.Sprintf("Id:%v dir:%s dirIdx:%s Collection:%s dataFile:%v nm:%v noWrite:%v canDelete:%v", v.Id, v.dir, v.dirIdx, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete)
  57. }
  58. func VolumeFileName(dir string, collection string, id int) (fileName string) {
  59. idString := strconv.Itoa(id)
  60. if collection == "" {
  61. fileName = path.Join(dir, idString)
  62. } else {
  63. fileName = path.Join(dir, collection+"_"+idString)
  64. }
  65. return
  66. }
  67. func (v *Volume) DataFileName() (fileName string) {
  68. return VolumeFileName(v.dir, v.Collection, int(v.Id))
  69. }
  70. func (v *Volume) IndexFileName() (fileName string) {
  71. return VolumeFileName(v.dirIdx, v.Collection, int(v.Id))
  72. }
  73. func (v *Volume) FileName(ext string) (fileName string) {
  74. switch ext {
  75. case ".idx", ".cpx", ".ldb":
  76. return VolumeFileName(v.dirIdx, v.Collection, int(v.Id)) + ext
  77. }
  78. // .dat, .cpd, .vif
  79. return VolumeFileName(v.dir, v.Collection, int(v.Id)) + ext
  80. }
  81. func (v *Volume) Version() needle.Version {
  82. if v.volumeInfo.Version != 0 {
  83. v.SuperBlock.Version = needle.Version(v.volumeInfo.Version)
  84. }
  85. return v.SuperBlock.Version
  86. }
  87. func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) {
  88. v.dataFileAccessLock.RLock()
  89. defer v.dataFileAccessLock.RUnlock()
  90. if v.DataBackend == nil {
  91. return
  92. }
  93. datFileSize, modTime, e := v.DataBackend.GetStat()
  94. if e == nil {
  95. return uint64(datFileSize), v.nm.IndexFileSize(), modTime
  96. }
  97. glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
  98. return // -1 causes integer overflow and the volume to become unwritable.
  99. }
  100. func (v *Volume) ContentSize() uint64 {
  101. v.dataFileAccessLock.RLock()
  102. defer v.dataFileAccessLock.RUnlock()
  103. if v.nm == nil {
  104. return 0
  105. }
  106. return v.nm.ContentSize()
  107. }
  108. func (v *Volume) DeletedSize() uint64 {
  109. v.dataFileAccessLock.RLock()
  110. defer v.dataFileAccessLock.RUnlock()
  111. if v.nm == nil {
  112. return 0
  113. }
  114. return v.nm.DeletedSize()
  115. }
  116. func (v *Volume) FileCount() uint64 {
  117. v.dataFileAccessLock.RLock()
  118. defer v.dataFileAccessLock.RUnlock()
  119. if v.nm == nil {
  120. return 0
  121. }
  122. return uint64(v.nm.FileCount())
  123. }
  124. func (v *Volume) DeletedCount() uint64 {
  125. v.dataFileAccessLock.RLock()
  126. defer v.dataFileAccessLock.RUnlock()
  127. if v.nm == nil {
  128. return 0
  129. }
  130. return uint64(v.nm.DeletedCount())
  131. }
  132. func (v *Volume) MaxFileKey() types.NeedleId {
  133. v.dataFileAccessLock.RLock()
  134. defer v.dataFileAccessLock.RUnlock()
  135. if v.nm == nil {
  136. return 0
  137. }
  138. return v.nm.MaxFileKey()
  139. }
  140. func (v *Volume) IndexFileSize() uint64 {
  141. v.dataFileAccessLock.RLock()
  142. defer v.dataFileAccessLock.RUnlock()
  143. if v.nm == nil {
  144. return 0
  145. }
  146. return v.nm.IndexFileSize()
  147. }
  148. func (v *Volume) DiskType() types.DiskType {
  149. return v.location.DiskType
  150. }
  151. func (v *Volume) SetStopping() {
  152. v.dataFileAccessLock.Lock()
  153. defer v.dataFileAccessLock.Unlock()
  154. if v.nm != nil {
  155. if err := v.nm.Sync(); err != nil {
  156. glog.Warningf("Volume SetStopping fail to sync volume idx %d", v.Id)
  157. }
  158. }
  159. if v.DataBackend != nil {
  160. if err := v.DataBackend.Sync(); err != nil {
  161. glog.Warningf("Volume SetStopping fail to sync volume %d", v.Id)
  162. }
  163. }
  164. }
  165. func (v *Volume) SyncToDisk() {
  166. v.dataFileAccessLock.Lock()
  167. defer v.dataFileAccessLock.Unlock()
  168. if v.nm != nil {
  169. if err := v.nm.Sync(); err != nil {
  170. glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
  171. }
  172. }
  173. if v.DataBackend != nil {
  174. if err := v.DataBackend.Sync(); err != nil {
  175. glog.Warningf("Volume Close fail to sync volume %d", v.Id)
  176. }
  177. }
  178. }
  179. // Close cleanly shuts down this volume
  180. func (v *Volume) Close() {
  181. v.dataFileAccessLock.Lock()
  182. defer v.dataFileAccessLock.Unlock()
  183. for v.isCommitCompacting {
  184. time.Sleep(521 * time.Millisecond)
  185. glog.Warningf("Volume Close wait for compaction %d", v.Id)
  186. }
  187. if v.nm != nil {
  188. if err := v.nm.Sync(); err != nil {
  189. glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
  190. }
  191. v.nm.Close()
  192. v.nm = nil
  193. }
  194. if v.DataBackend != nil {
  195. if err := v.DataBackend.Sync(); err != nil {
  196. glog.Warningf("Volume Close fail to sync volume %d", v.Id)
  197. }
  198. _ = v.DataBackend.Close()
  199. v.DataBackend = nil
  200. stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec()
  201. }
  202. }
  203. func (v *Volume) NeedToReplicate() bool {
  204. return v.ReplicaPlacement.GetCopyCount() > 1
  205. }
  206. // volume is expired if modified time + volume ttl < now
  207. // except when volume is empty
  208. // or when the volume does not have a ttl
  209. // or when volumeSizeLimit is 0 when server just starts
  210. func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool {
  211. if volumeSizeLimit == 0 {
  212. // skip if we don't know size limit
  213. return false
  214. }
  215. if contentSize <= super_block.SuperBlockSize {
  216. return false
  217. }
  218. if v.Ttl == nil || v.Ttl.Minutes() == 0 {
  219. return false
  220. }
  221. glog.V(2).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds)
  222. livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
  223. glog.V(2).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes)
  224. if int64(v.Ttl.Minutes()) < livedMinutes {
  225. return true
  226. }
  227. return false
  228. }
  229. // wait either maxDelayMinutes or 10% of ttl minutes
  230. func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool {
  231. if v.Ttl == nil || v.Ttl.Minutes() == 0 {
  232. return false
  233. }
  234. removalDelay := v.Ttl.Minutes() / 10
  235. if removalDelay > maxDelayMinutes {
  236. removalDelay = maxDelayMinutes
  237. }
  238. if uint64(v.Ttl.Minutes()+removalDelay)*60+v.lastModifiedTsSeconds < uint64(time.Now().Unix()) {
  239. return true
  240. }
  241. return false
  242. }
  243. func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) {
  244. v.dataFileAccessLock.RLock()
  245. defer v.dataFileAccessLock.RUnlock()
  246. glog.V(3).Infof("collectStatus volume %d", v.Id)
  247. if v.nm == nil || v.DataBackend == nil {
  248. return
  249. }
  250. ok = true
  251. maxFileKey = v.nm.MaxFileKey()
  252. datFileSize, modTime, _ = v.DataBackend.GetStat()
  253. fileCount = uint64(v.nm.FileCount())
  254. deletedCount = uint64(v.nm.DeletedCount())
  255. deletedSize = v.nm.DeletedSize()
  256. fileCount = uint64(v.nm.FileCount())
  257. return
  258. }
  259. func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.VolumeInformationMessage) {
  260. maxFileKey, volumeSize, modTime, fileCount, deletedCount, deletedSize, ok := v.collectStatus()
  261. if !ok {
  262. return 0, nil
  263. }
  264. volumeInfo := &master_pb.VolumeInformationMessage{
  265. Id: uint32(v.Id),
  266. Size: uint64(volumeSize),
  267. Collection: v.Collection,
  268. FileCount: fileCount,
  269. DeleteCount: deletedCount,
  270. DeletedByteCount: deletedSize,
  271. ReadOnly: v.IsReadOnly(),
  272. ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
  273. Version: uint32(v.Version()),
  274. Ttl: v.Ttl.ToUint32(),
  275. CompactRevision: uint32(v.SuperBlock.CompactionRevision),
  276. ModifiedAtSecond: modTime.Unix(),
  277. DiskType: string(v.location.DiskType),
  278. }
  279. volumeInfo.RemoteStorageName, volumeInfo.RemoteStorageKey = v.RemoteStorageNameKey()
  280. return maxFileKey, volumeInfo
  281. }
  282. func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) {
  283. if v.volumeInfo == nil {
  284. return
  285. }
  286. if len(v.volumeInfo.GetFiles()) == 0 {
  287. return
  288. }
  289. return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey()
  290. }
  291. func (v *Volume) IsReadOnly() bool {
  292. v.noWriteLock.RLock()
  293. defer v.noWriteLock.RUnlock()
  294. return v.noWriteOrDelete || v.noWriteCanDelete || v.location.isDiskSpaceLow
  295. }