You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

360 lines
9.8 KiB

6 years ago
5 years ago
5 years ago
6 years ago
5 years ago
6 years ago
10 years ago
6 years ago
12 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
  1. package storage
  2. import (
  3. "fmt"
  4. "path"
  5. "strconv"
  6. "sync"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  10. "github.com/seaweedfs/seaweedfs/weed/stats"
  11. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  12. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  15. "github.com/seaweedfs/seaweedfs/weed/glog"
  16. )
  17. type Volume struct {
  18. Id needle.VolumeId
  19. dir string
  20. dirIdx string
  21. Collection string
  22. DataBackend backend.BackendStorageFile
  23. nm NeedleMapper
  24. tmpNm TempNeedleMapper
  25. needleMapKind NeedleMapKind
  26. noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
  27. noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
  28. noWriteLock sync.RWMutex
  29. hasRemoteFile bool // if the volume has a remote file
  30. MemoryMapMaxSizeMb uint32
  31. super_block.SuperBlock
  32. dataFileAccessLock sync.RWMutex
  33. superBlockAccessLock sync.Mutex
  34. asyncRequestsChan chan *needle.AsyncRequest
  35. lastModifiedTsSeconds uint64 // unix time in seconds
  36. lastAppendAtNs uint64 // unix time in nanoseconds
  37. lastCompactIndexOffset uint64
  38. lastCompactRevision uint16
  39. ldbTimeout int64
  40. isCompacting bool
  41. isCommitCompacting bool
  42. volumeInfo *volume_server_pb.VolumeInfo
  43. location *DiskLocation
  44. lastIoError error
  45. }
  46. func NewVolume(dirname string, dirIdx string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, ldbTimeout int64) (v *Volume, e error) {
  47. // if replicaPlacement is nil, the superblock will be loaded from disk
  48. v = &Volume{dir: dirname, dirIdx: dirIdx, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
  49. asyncRequestsChan: make(chan *needle.AsyncRequest, 128)}
  50. v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl}
  51. v.needleMapKind = needleMapKind
  52. v.ldbTimeout = ldbTimeout
  53. e = v.load(true, true, needleMapKind, preallocate)
  54. v.startWorker()
  55. return
  56. }
  57. func (v *Volume) String() string {
  58. v.noWriteLock.RLock()
  59. defer v.noWriteLock.RUnlock()
  60. return fmt.Sprintf("Id:%v dir:%s dirIdx:%s Collection:%s dataFile:%v nm:%v noWrite:%v canDelete:%v", v.Id, v.dir, v.dirIdx, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete)
  61. }
  62. func VolumeFileName(dir string, collection string, id int) (fileName string) {
  63. idString := strconv.Itoa(id)
  64. if collection == "" {
  65. fileName = path.Join(dir, idString)
  66. } else {
  67. fileName = path.Join(dir, collection+"_"+idString)
  68. }
  69. return
  70. }
  71. func (v *Volume) DataFileName() (fileName string) {
  72. return VolumeFileName(v.dir, v.Collection, int(v.Id))
  73. }
  74. func (v *Volume) IndexFileName() (fileName string) {
  75. return VolumeFileName(v.dirIdx, v.Collection, int(v.Id))
  76. }
  77. func (v *Volume) FileName(ext string) (fileName string) {
  78. switch ext {
  79. case ".idx", ".cpx", ".ldb":
  80. return VolumeFileName(v.dirIdx, v.Collection, int(v.Id)) + ext
  81. }
  82. // .dat, .cpd, .vif
  83. return VolumeFileName(v.dir, v.Collection, int(v.Id)) + ext
  84. }
  85. func (v *Volume) Version() needle.Version {
  86. v.superBlockAccessLock.Lock()
  87. defer v.superBlockAccessLock.Unlock()
  88. if v.volumeInfo.Version != 0 {
  89. v.SuperBlock.Version = needle.Version(v.volumeInfo.Version)
  90. }
  91. return v.SuperBlock.Version
  92. }
  93. func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) {
  94. v.dataFileAccessLock.RLock()
  95. defer v.dataFileAccessLock.RUnlock()
  96. if v.DataBackend == nil {
  97. return
  98. }
  99. datFileSize, modTime, e := v.DataBackend.GetStat()
  100. if e == nil {
  101. return uint64(datFileSize), v.nm.IndexFileSize(), modTime
  102. }
  103. glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
  104. return // -1 causes integer overflow and the volume to become unwritable.
  105. }
  106. func (v *Volume) ContentSize() uint64 {
  107. v.dataFileAccessLock.RLock()
  108. defer v.dataFileAccessLock.RUnlock()
  109. if v.nm == nil {
  110. return 0
  111. }
  112. return v.nm.ContentSize()
  113. }
  114. func (v *Volume) doIsEmpty() (bool, error) {
  115. // check v.DataBackend.GetStat()
  116. if v.DataBackend == nil {
  117. return false, fmt.Errorf("v.DataBackend is nil")
  118. } else {
  119. datFileSize, _, e := v.DataBackend.GetStat()
  120. if e != nil {
  121. glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
  122. return false, fmt.Errorf("v.DataBackend.GetStat(): %v", e)
  123. }
  124. if datFileSize > super_block.SuperBlockSize {
  125. return false, nil
  126. }
  127. }
  128. // check v.nm.ContentSize()
  129. if v.nm != nil {
  130. if v.nm.ContentSize() > 0 {
  131. return false, nil
  132. }
  133. }
  134. return true, nil
  135. }
  136. func (v *Volume) DeletedSize() uint64 {
  137. v.dataFileAccessLock.RLock()
  138. defer v.dataFileAccessLock.RUnlock()
  139. if v.nm == nil {
  140. return 0
  141. }
  142. return v.nm.DeletedSize()
  143. }
  144. func (v *Volume) FileCount() uint64 {
  145. v.dataFileAccessLock.RLock()
  146. defer v.dataFileAccessLock.RUnlock()
  147. if v.nm == nil {
  148. return 0
  149. }
  150. return uint64(v.nm.FileCount())
  151. }
  152. func (v *Volume) DeletedCount() uint64 {
  153. v.dataFileAccessLock.RLock()
  154. defer v.dataFileAccessLock.RUnlock()
  155. if v.nm == nil {
  156. return 0
  157. }
  158. return uint64(v.nm.DeletedCount())
  159. }
  160. func (v *Volume) MaxFileKey() types.NeedleId {
  161. v.dataFileAccessLock.RLock()
  162. defer v.dataFileAccessLock.RUnlock()
  163. if v.nm == nil {
  164. return 0
  165. }
  166. return v.nm.MaxFileKey()
  167. }
  168. func (v *Volume) IndexFileSize() uint64 {
  169. v.dataFileAccessLock.RLock()
  170. defer v.dataFileAccessLock.RUnlock()
  171. if v.nm == nil {
  172. return 0
  173. }
  174. return v.nm.IndexFileSize()
  175. }
  176. func (v *Volume) DiskType() types.DiskType {
  177. return v.location.DiskType
  178. }
  179. func (v *Volume) SyncToDisk() {
  180. v.dataFileAccessLock.Lock()
  181. defer v.dataFileAccessLock.Unlock()
  182. if v.nm != nil {
  183. if err := v.nm.Sync(); err != nil {
  184. glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
  185. }
  186. }
  187. if v.DataBackend != nil {
  188. if err := v.DataBackend.Sync(); err != nil {
  189. glog.Warningf("Volume Close fail to sync volume %d", v.Id)
  190. }
  191. }
  192. }
  193. // Close cleanly shuts down this volume
  194. func (v *Volume) Close() {
  195. v.dataFileAccessLock.Lock()
  196. defer v.dataFileAccessLock.Unlock()
  197. v.doClose()
  198. }
  199. func (v *Volume) doClose() {
  200. for v.isCommitCompacting {
  201. time.Sleep(521 * time.Millisecond)
  202. glog.Warningf("Volume Close wait for compaction %d", v.Id)
  203. }
  204. if v.nm != nil {
  205. if err := v.nm.Sync(); err != nil {
  206. glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
  207. }
  208. v.nm.Close()
  209. v.nm = nil
  210. }
  211. if v.DataBackend != nil {
  212. if err := v.DataBackend.Close(); err != nil {
  213. glog.Warningf("Volume Close fail to sync volume %d", v.Id)
  214. }
  215. v.DataBackend = nil
  216. stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec()
  217. }
  218. }
  219. func (v *Volume) NeedToReplicate() bool {
  220. return v.ReplicaPlacement.GetCopyCount() > 1
  221. }
  222. // volume is expired if modified time + volume ttl < now
  223. // except when volume is empty
  224. // or when the volume does not have a ttl
  225. // or when volumeSizeLimit is 0 when server just starts
  226. func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool {
  227. if volumeSizeLimit == 0 {
  228. // skip if we don't know size limit
  229. return false
  230. }
  231. if contentSize <= super_block.SuperBlockSize {
  232. return false
  233. }
  234. if v.Ttl == nil || v.Ttl.Minutes() == 0 {
  235. return false
  236. }
  237. glog.V(2).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds)
  238. livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
  239. glog.V(2).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes)
  240. if int64(v.Ttl.Minutes()) < livedMinutes {
  241. return true
  242. }
  243. return false
  244. }
  245. // wait either maxDelayMinutes or 10% of ttl minutes
  246. func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool {
  247. if v.Ttl == nil || v.Ttl.Minutes() == 0 {
  248. return false
  249. }
  250. removalDelay := v.Ttl.Minutes() / 10
  251. if removalDelay > maxDelayMinutes {
  252. removalDelay = maxDelayMinutes
  253. }
  254. if uint64(v.Ttl.Minutes()+removalDelay)*60+v.lastModifiedTsSeconds < uint64(time.Now().Unix()) {
  255. return true
  256. }
  257. return false
  258. }
  259. func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) {
  260. v.dataFileAccessLock.RLock()
  261. defer v.dataFileAccessLock.RUnlock()
  262. glog.V(4).Infof("collectStatus volume %d", v.Id)
  263. if v.nm == nil || v.DataBackend == nil {
  264. return
  265. }
  266. ok = true
  267. maxFileKey = v.nm.MaxFileKey()
  268. datFileSize, modTime, _ = v.DataBackend.GetStat()
  269. fileCount = uint64(v.nm.FileCount())
  270. deletedCount = uint64(v.nm.DeletedCount())
  271. deletedSize = v.nm.DeletedSize()
  272. return
  273. }
  274. func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.VolumeInformationMessage) {
  275. maxFileKey, volumeSize, modTime, fileCount, deletedCount, deletedSize, ok := v.collectStatus()
  276. if !ok {
  277. return 0, nil
  278. }
  279. volumeInfo := &master_pb.VolumeInformationMessage{
  280. Id: uint32(v.Id),
  281. Size: uint64(volumeSize),
  282. Collection: v.Collection,
  283. FileCount: fileCount,
  284. DeleteCount: deletedCount,
  285. DeletedByteCount: deletedSize,
  286. ReadOnly: v.IsReadOnly(),
  287. ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
  288. Version: uint32(v.Version()),
  289. Ttl: v.Ttl.ToUint32(),
  290. CompactRevision: uint32(v.SuperBlock.CompactionRevision),
  291. ModifiedAtSecond: modTime.Unix(),
  292. DiskType: string(v.location.DiskType),
  293. }
  294. volumeInfo.RemoteStorageName, volumeInfo.RemoteStorageKey = v.RemoteStorageNameKey()
  295. return maxFileKey, volumeInfo
  296. }
  297. func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) {
  298. if v.volumeInfo == nil {
  299. return
  300. }
  301. if len(v.volumeInfo.GetFiles()) == 0 {
  302. return
  303. }
  304. return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey()
  305. }
  306. func (v *Volume) IsReadOnly() bool {
  307. v.noWriteLock.RLock()
  308. defer v.noWriteLock.RUnlock()
  309. return v.noWriteOrDelete || v.noWriteCanDelete || v.location.isDiskSpaceLow
  310. }