You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

666 lines
21 KiB

6 years ago
6 years ago
6 years ago
12 years ago
12 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
10 months ago
4 years ago
4 years ago
4 years ago
5 years ago
6 years ago
6 years ago
6 years ago
6 years ago
5 years ago
  1. package storage
  2. import (
  3. "fmt"
  4. "io"
  5. "path/filepath"
  6. "strings"
  7. "sync"
  8. "sync/atomic"
  9. "github.com/seaweedfs/seaweedfs/weed/pb"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
  11. "github.com/seaweedfs/seaweedfs/weed/util"
  12. "google.golang.org/grpc"
  13. "github.com/seaweedfs/seaweedfs/weed/glog"
  14. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  15. "github.com/seaweedfs/seaweedfs/weed/stats"
  16. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  17. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  18. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  19. . "github.com/seaweedfs/seaweedfs/weed/storage/types"
  20. )
  21. const (
  22. MAX_TTL_VOLUME_REMOVAL_DELAY = 10 // 10 minutes
  23. )
  24. type ReadOption struct {
  25. // request
  26. ReadDeleted bool
  27. AttemptMetaOnly bool
  28. MustMetaOnly bool
  29. // response
  30. IsMetaOnly bool // read status
  31. VolumeRevision uint16
  32. IsOutOfRange bool // whether read over MaxPossibleVolumeSize
  33. // If HasSlowRead is set to true:
  34. // * read requests and write requests compete for the lock.
  35. // * large file read P99 latency on busy sites will go up, due to the need to get locks multiple times.
  36. // * write requests will see lower latency.
  37. // If HasSlowRead is set to false:
  38. // * read requests should complete asap, not blocking other requests.
  39. // * write requests may see high latency when downloading large files.
  40. HasSlowRead bool
  41. // increasing ReadBufferSize can reduce the number of get locks times and shorten read P99 latency.
  42. // but will increase memory usage a bit. Use with hasSlowRead normally.
  43. ReadBufferSize int
  44. }
  45. /*
  46. * A VolumeServer contains one Store
  47. */
  48. type Store struct {
  49. MasterAddress pb.ServerAddress
  50. grpcDialOption grpc.DialOption
  51. volumeSizeLimit uint64 // read from the master
  52. preallocate atomic.Bool // read from the master
  53. Ip string
  54. Port int
  55. GrpcPort int
  56. PublicUrl string
  57. Locations []*DiskLocation
  58. dataCenter string // optional information, overwriting master setting if exists
  59. rack string // optional information, overwriting master setting if exists
  60. connected bool
  61. NeedleMapKind NeedleMapKind
  62. NewVolumesChan chan master_pb.VolumeShortInformationMessage
  63. DeletedVolumesChan chan master_pb.VolumeShortInformationMessage
  64. NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage
  65. DeletedEcShardsChan chan master_pb.VolumeEcShardInformationMessage
  66. isStopping bool
  67. }
  68. func (s *Store) String() (str string) {
  69. str = fmt.Sprintf("Ip:%s, Port:%d, GrpcPort:%d PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d", s.Ip, s.Port, s.GrpcPort, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.GetVolumeSizeLimit())
  70. return
  71. }
  72. func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int, publicUrl string, dirnames []string, maxVolumeCounts []int32,
  73. minFreeSpaces []util.MinFreeSpace, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType, ldbTimeout int64) (s *Store) {
  74. s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, GrpcPort: grpcPort, PublicUrl: publicUrl, NeedleMapKind: needleMapKind}
  75. s.Locations = make([]*DiskLocation, 0)
  76. var wg sync.WaitGroup
  77. for i := 0; i < len(dirnames); i++ {
  78. location := NewDiskLocation(dirnames[i], int32(maxVolumeCounts[i]), minFreeSpaces[i], idxFolder, diskTypes[i])
  79. s.Locations = append(s.Locations, location)
  80. stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i]))
  81. wg.Add(1)
  82. go func() {
  83. defer wg.Done()
  84. location.loadExistingVolumes(needleMapKind, ldbTimeout)
  85. }()
  86. }
  87. wg.Wait()
  88. s.NewVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
  89. s.DeletedVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
  90. s.NewEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
  91. s.DeletedEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
  92. return
  93. }
  94. func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32, diskType DiskType, ldbTimeout int64) error {
  95. rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement)
  96. if e != nil {
  97. return e
  98. }
  99. ttl, e := needle.ReadTTL(ttlString)
  100. if e != nil {
  101. return e
  102. }
  103. e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, MemoryMapMaxSizeMb, diskType, ldbTimeout)
  104. return e
  105. }
  106. func (s *Store) DeleteCollection(collection string) (e error) {
  107. for _, location := range s.Locations {
  108. e = location.DeleteCollectionFromDiskLocation(collection)
  109. if e != nil {
  110. return
  111. }
  112. stats.DeleteCollectionMetrics(collection)
  113. // let the heartbeat send the list of volumes, instead of sending the deleted volume ids to DeletedVolumesChan
  114. }
  115. return
  116. }
  117. func (s *Store) findVolume(vid needle.VolumeId) *Volume {
  118. for _, location := range s.Locations {
  119. if v, found := location.FindVolume(vid); found {
  120. return v
  121. }
  122. }
  123. return nil
  124. }
  125. func (s *Store) FindFreeLocation(filterFn func(location *DiskLocation) bool) (ret *DiskLocation) {
  126. max := int32(0)
  127. for _, location := range s.Locations {
  128. if filterFn != nil && !filterFn(location) {
  129. continue
  130. }
  131. if location.isDiskSpaceLow {
  132. continue
  133. }
  134. currentFreeCount := location.MaxVolumeCount - int32(location.VolumesLen())
  135. currentFreeCount *= erasure_coding.DataShardsCount
  136. currentFreeCount -= int32(location.EcShardCount())
  137. currentFreeCount /= erasure_coding.DataShardsCount
  138. if currentFreeCount > max {
  139. max = currentFreeCount
  140. ret = location
  141. }
  142. }
  143. return ret
  144. }
  145. func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, diskType DiskType, ldbTimeout int64) error {
  146. if s.findVolume(vid) != nil {
  147. return fmt.Errorf("Volume Id %d already exists!", vid)
  148. }
  149. if location := s.FindFreeLocation(func(location *DiskLocation) bool {
  150. return location.DiskType == diskType
  151. }); location != nil {
  152. glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
  153. location.Directory, vid, collection, replicaPlacement, ttl)
  154. if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb, ldbTimeout); err == nil {
  155. location.SetVolume(vid, volume)
  156. glog.V(0).Infof("add volume %d", vid)
  157. s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
  158. Id: uint32(vid),
  159. Collection: collection,
  160. ReplicaPlacement: uint32(replicaPlacement.Byte()),
  161. Version: uint32(volume.Version()),
  162. Ttl: ttl.ToUint32(),
  163. DiskType: string(diskType),
  164. }
  165. return nil
  166. } else {
  167. return err
  168. }
  169. }
  170. return fmt.Errorf("No more free space left")
  171. }
  172. func (s *Store) VolumeInfos() (allStats []*VolumeInfo) {
  173. for _, location := range s.Locations {
  174. stats := collectStatsForOneLocation(location)
  175. allStats = append(allStats, stats...)
  176. }
  177. sortVolumeInfos(allStats)
  178. return allStats
  179. }
  180. func collectStatsForOneLocation(location *DiskLocation) (stats []*VolumeInfo) {
  181. location.volumesLock.RLock()
  182. defer location.volumesLock.RUnlock()
  183. for k, v := range location.volumes {
  184. s := collectStatForOneVolume(k, v)
  185. stats = append(stats, s)
  186. }
  187. return stats
  188. }
  189. func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) {
  190. s = &VolumeInfo{
  191. Id: vid,
  192. Collection: v.Collection,
  193. ReplicaPlacement: v.ReplicaPlacement,
  194. Version: v.Version(),
  195. ReadOnly: v.IsReadOnly(),
  196. Ttl: v.Ttl,
  197. CompactRevision: uint32(v.CompactionRevision),
  198. DiskType: v.DiskType().String(),
  199. }
  200. s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
  201. v.dataFileAccessLock.RLock()
  202. defer v.dataFileAccessLock.RUnlock()
  203. if v.nm == nil {
  204. return
  205. }
  206. s.FileCount = v.nm.FileCount()
  207. s.DeleteCount = v.nm.DeletedCount()
  208. s.DeletedByteCount = v.nm.DeletedSize()
  209. s.Size = v.nm.ContentSize()
  210. return
  211. }
  212. func (s *Store) SetDataCenter(dataCenter string) {
  213. s.dataCenter = dataCenter
  214. }
  215. func (s *Store) SetRack(rack string) {
  216. s.rack = rack
  217. }
  218. func (s *Store) GetDataCenter() string {
  219. return s.dataCenter
  220. }
  221. func (s *Store) GetRack() string {
  222. return s.rack
  223. }
  224. func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
  225. var volumeMessages []*master_pb.VolumeInformationMessage
  226. maxVolumeCounts := make(map[string]uint32)
  227. var maxFileKey NeedleId
  228. collectionVolumeSize := make(map[string]int64)
  229. collectionVolumeDeletedBytes := make(map[string]int64)
  230. collectionVolumeReadOnlyCount := make(map[string]map[string]uint8)
  231. for _, location := range s.Locations {
  232. var deleteVids []needle.VolumeId
  233. maxVolumeCounts[string(location.DiskType)] += uint32(location.MaxVolumeCount)
  234. location.volumesLock.RLock()
  235. for _, v := range location.volumes {
  236. curMaxFileKey, volumeMessage := v.ToVolumeInformationMessage()
  237. if volumeMessage == nil {
  238. continue
  239. }
  240. if maxFileKey < curMaxFileKey {
  241. maxFileKey = curMaxFileKey
  242. }
  243. shouldDeleteVolume := false
  244. if !v.expired(volumeMessage.Size, s.GetVolumeSizeLimit()) {
  245. volumeMessages = append(volumeMessages, volumeMessage)
  246. } else {
  247. if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) {
  248. deleteVids = append(deleteVids, v.Id)
  249. shouldDeleteVolume = true
  250. } else {
  251. glog.V(0).Infof("volume %d is expired", v.Id)
  252. }
  253. if v.lastIoError != nil {
  254. deleteVids = append(deleteVids, v.Id)
  255. shouldDeleteVolume = true
  256. glog.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError)
  257. }
  258. }
  259. if _, exist := collectionVolumeSize[v.Collection]; !exist {
  260. collectionVolumeSize[v.Collection] = 0
  261. collectionVolumeDeletedBytes[v.Collection] = 0
  262. }
  263. if !shouldDeleteVolume {
  264. collectionVolumeSize[v.Collection] += int64(volumeMessage.Size)
  265. collectionVolumeDeletedBytes[v.Collection] += int64(volumeMessage.DeletedByteCount)
  266. } else {
  267. collectionVolumeSize[v.Collection] -= int64(volumeMessage.Size)
  268. if collectionVolumeSize[v.Collection] <= 0 {
  269. delete(collectionVolumeSize, v.Collection)
  270. }
  271. }
  272. if _, exist := collectionVolumeReadOnlyCount[v.Collection]; !exist {
  273. collectionVolumeReadOnlyCount[v.Collection] = map[string]uint8{
  274. stats.IsReadOnly: 0,
  275. stats.NoWriteOrDelete: 0,
  276. stats.NoWriteCanDelete: 0,
  277. stats.IsDiskSpaceLow: 0,
  278. }
  279. }
  280. if !shouldDeleteVolume && v.IsReadOnly() {
  281. collectionVolumeReadOnlyCount[v.Collection][stats.IsReadOnly] += 1
  282. if v.noWriteOrDelete {
  283. collectionVolumeReadOnlyCount[v.Collection][stats.NoWriteOrDelete] += 1
  284. }
  285. if v.noWriteCanDelete {
  286. collectionVolumeReadOnlyCount[v.Collection][stats.NoWriteCanDelete] += 1
  287. }
  288. if v.location.isDiskSpaceLow {
  289. collectionVolumeReadOnlyCount[v.Collection][stats.IsDiskSpaceLow] += 1
  290. }
  291. }
  292. }
  293. location.volumesLock.RUnlock()
  294. if len(deleteVids) > 0 {
  295. // delete expired volumes.
  296. location.volumesLock.Lock()
  297. for _, vid := range deleteVids {
  298. found, err := location.deleteVolumeById(vid, false)
  299. if err == nil {
  300. if found {
  301. glog.V(0).Infof("volume %d is deleted", vid)
  302. }
  303. } else {
  304. glog.Warningf("delete volume %d: %v", vid, err)
  305. }
  306. }
  307. location.volumesLock.Unlock()
  308. }
  309. }
  310. // delete expired ec volumes
  311. ecVolumeMessages, deletedEcVolumes := s.deleteExpiredEcVolumes()
  312. var uuidList []string
  313. for _, loc := range s.Locations {
  314. uuidList = append(uuidList, loc.DirectoryUuid)
  315. }
  316. for col, size := range collectionVolumeSize {
  317. stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "normal").Set(float64(size))
  318. }
  319. for col, deletedBytes := range collectionVolumeDeletedBytes {
  320. stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "deleted_bytes").Set(float64(deletedBytes))
  321. }
  322. for col, types := range collectionVolumeReadOnlyCount {
  323. for t, count := range types {
  324. stats.VolumeServerReadOnlyVolumeGauge.WithLabelValues(col, t).Set(float64(count))
  325. }
  326. }
  327. return &master_pb.Heartbeat{
  328. Ip: s.Ip,
  329. Port: uint32(s.Port),
  330. GrpcPort: uint32(s.GrpcPort),
  331. PublicUrl: s.PublicUrl,
  332. MaxVolumeCounts: maxVolumeCounts,
  333. MaxFileKey: NeedleIdToUint64(maxFileKey),
  334. DataCenter: s.dataCenter,
  335. Rack: s.rack,
  336. Volumes: volumeMessages,
  337. DeletedEcShards: deletedEcVolumes,
  338. HasNoVolumes: len(volumeMessages) == 0,
  339. HasNoEcShards: len(ecVolumeMessages) == 0,
  340. LocationUuids: uuidList,
  341. }
  342. }
  343. func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeEcShardInformationMessage) {
  344. for _, location := range s.Locations {
  345. // Collect ecVolume to be deleted
  346. var toDeleteEvs []*erasure_coding.EcVolume
  347. location.ecVolumesLock.RLock()
  348. for _, ev := range location.ecVolumes {
  349. if ev.IsTimeToDestroy() {
  350. toDeleteEvs = append(toDeleteEvs, ev)
  351. } else {
  352. messages := ev.ToVolumeEcShardInformationMessage()
  353. ecShards = append(ecShards, messages...)
  354. }
  355. }
  356. location.ecVolumesLock.RUnlock()
  357. // Delete expired volumes
  358. for _, ev := range toDeleteEvs {
  359. messages := ev.ToVolumeEcShardInformationMessage()
  360. // deleteEcVolumeById has its own lock
  361. err := location.deleteEcVolumeById(ev.VolumeId)
  362. if err != nil {
  363. ecShards = append(ecShards, messages...)
  364. glog.Errorf("delete EcVolume err %d: %v", ev.VolumeId, err)
  365. continue
  366. }
  367. // No need for additional lock here since we only need the messages
  368. // from volumes that were already collected
  369. deleted = append(deleted, messages...)
  370. }
  371. }
  372. return
  373. }
  374. func (s *Store) SetStopping() {
  375. s.isStopping = true
  376. for _, location := range s.Locations {
  377. location.SetStopping()
  378. }
  379. }
  380. func (s *Store) LoadNewVolumes() {
  381. for _, location := range s.Locations {
  382. location.loadExistingVolumes(s.NeedleMapKind, 0)
  383. }
  384. }
  385. func (s *Store) Close() {
  386. for _, location := range s.Locations {
  387. location.Close()
  388. }
  389. }
  390. func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, checkCookie bool, fsync bool) (isUnchanged bool, err error) {
  391. if v := s.findVolume(i); v != nil {
  392. if v.IsReadOnly() {
  393. err = fmt.Errorf("volume %d is read only", i)
  394. return
  395. }
  396. _, _, isUnchanged, err = v.writeNeedle2(n, checkCookie, fsync && s.isStopping)
  397. return
  398. }
  399. glog.V(0).Infoln("volume", i, "not found!")
  400. err = fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
  401. return
  402. }
  403. func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (Size, error) {
  404. if v := s.findVolume(i); v != nil {
  405. if v.noWriteOrDelete {
  406. return 0, fmt.Errorf("volume %d is read only", i)
  407. }
  408. return v.deleteNeedle2(n)
  409. }
  410. return 0, fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
  411. }
  412. func (s *Store) ReadVolumeNeedle(i needle.VolumeId, n *needle.Needle, readOption *ReadOption, onReadSizeFn func(size Size)) (int, error) {
  413. if v := s.findVolume(i); v != nil {
  414. return v.readNeedle(n, readOption, onReadSizeFn)
  415. }
  416. return 0, fmt.Errorf("volume %d not found", i)
  417. }
  418. func (s *Store) ReadVolumeNeedleMetaAt(i needle.VolumeId, n *needle.Needle, offset int64, size int32) error {
  419. if v := s.findVolume(i); v != nil {
  420. return v.readNeedleMetaAt(n, offset, size)
  421. }
  422. return fmt.Errorf("volume %d not found", i)
  423. }
  424. func (s *Store) ReadVolumeNeedleDataInto(i needle.VolumeId, n *needle.Needle, readOption *ReadOption, writer io.Writer, offset int64, size int64) error {
  425. if v := s.findVolume(i); v != nil {
  426. return v.readNeedleDataInto(n, readOption, writer, offset, size)
  427. }
  428. return fmt.Errorf("volume %d not found", i)
  429. }
  430. func (s *Store) GetVolume(i needle.VolumeId) *Volume {
  431. return s.findVolume(i)
  432. }
  433. func (s *Store) HasVolume(i needle.VolumeId) bool {
  434. v := s.findVolume(i)
  435. return v != nil
  436. }
  437. func (s *Store) MarkVolumeReadonly(i needle.VolumeId, persist bool) error {
  438. v := s.findVolume(i)
  439. if v == nil {
  440. return fmt.Errorf("volume %d not found", i)
  441. }
  442. v.noWriteLock.Lock()
  443. v.noWriteOrDelete = true
  444. if persist {
  445. v.PersistReadOnly(true)
  446. }
  447. v.noWriteLock.Unlock()
  448. return nil
  449. }
  450. func (s *Store) MarkVolumeWritable(i needle.VolumeId) error {
  451. v := s.findVolume(i)
  452. if v == nil {
  453. return fmt.Errorf("volume %d not found", i)
  454. }
  455. v.noWriteLock.Lock()
  456. v.noWriteOrDelete = false
  457. v.PersistReadOnly(false)
  458. v.noWriteLock.Unlock()
  459. return nil
  460. }
  461. func (s *Store) MountVolume(i needle.VolumeId) error {
  462. for _, location := range s.Locations {
  463. if found := location.LoadVolume(i, s.NeedleMapKind); found == true {
  464. glog.V(0).Infof("mount volume %d", i)
  465. v := s.findVolume(i)
  466. s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
  467. Id: uint32(v.Id),
  468. Collection: v.Collection,
  469. ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
  470. Version: uint32(v.Version()),
  471. Ttl: v.Ttl.ToUint32(),
  472. DiskType: string(v.location.DiskType),
  473. }
  474. return nil
  475. }
  476. }
  477. return fmt.Errorf("volume %d not found on disk", i)
  478. }
  479. func (s *Store) UnmountVolume(i needle.VolumeId) error {
  480. v := s.findVolume(i)
  481. if v == nil {
  482. return nil
  483. }
  484. message := master_pb.VolumeShortInformationMessage{
  485. Id: uint32(v.Id),
  486. Collection: v.Collection,
  487. ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
  488. Version: uint32(v.Version()),
  489. Ttl: v.Ttl.ToUint32(),
  490. DiskType: string(v.location.DiskType),
  491. }
  492. for _, location := range s.Locations {
  493. err := location.UnloadVolume(i)
  494. if err == nil {
  495. glog.V(0).Infof("UnmountVolume %d", i)
  496. s.DeletedVolumesChan <- message
  497. return nil
  498. } else if err == ErrVolumeNotFound {
  499. continue
  500. }
  501. }
  502. return fmt.Errorf("volume %d not found on disk", i)
  503. }
  504. func (s *Store) DeleteVolume(i needle.VolumeId, onlyEmpty bool) error {
  505. v := s.findVolume(i)
  506. if v == nil {
  507. return fmt.Errorf("delete volume %d not found on disk", i)
  508. }
  509. message := master_pb.VolumeShortInformationMessage{
  510. Id: uint32(v.Id),
  511. Collection: v.Collection,
  512. ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
  513. Version: uint32(v.Version()),
  514. Ttl: v.Ttl.ToUint32(),
  515. DiskType: string(v.location.DiskType),
  516. }
  517. for _, location := range s.Locations {
  518. err := location.DeleteVolume(i, onlyEmpty)
  519. if err == nil {
  520. glog.V(0).Infof("DeleteVolume %d", i)
  521. s.DeletedVolumesChan <- message
  522. return nil
  523. } else if err == ErrVolumeNotFound {
  524. continue
  525. } else if err == ErrVolumeNotEmpty {
  526. return fmt.Errorf("DeleteVolume %d: %v", i, err)
  527. } else {
  528. glog.Errorf("DeleteVolume %d: %v", i, err)
  529. }
  530. }
  531. return fmt.Errorf("volume %d not found on disk", i)
  532. }
  533. func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error {
  534. for _, location := range s.Locations {
  535. fileInfo, found := location.LocateVolume(i)
  536. if !found {
  537. continue
  538. }
  539. // load, modify, save
  540. baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name()))
  541. vifFile := filepath.Join(location.Directory, baseFileName+".vif")
  542. volumeInfo, _, _, err := volume_info.MaybeLoadVolumeInfo(vifFile)
  543. if err != nil {
  544. return fmt.Errorf("volume %d failed to load vif: %v", i, err)
  545. }
  546. volumeInfo.Replication = replication
  547. err = volume_info.SaveVolumeInfo(vifFile, volumeInfo)
  548. if err != nil {
  549. return fmt.Errorf("volume %d failed to save vif: %v", i, err)
  550. }
  551. return nil
  552. }
  553. return fmt.Errorf("volume %d not found on disk", i)
  554. }
  555. func (s *Store) SetVolumeSizeLimit(x uint64) {
  556. atomic.StoreUint64(&s.volumeSizeLimit, x)
  557. }
  558. func (s *Store) GetVolumeSizeLimit() uint64 {
  559. return atomic.LoadUint64(&s.volumeSizeLimit)
  560. }
  561. func (s *Store) SetPreallocate(x bool) {
  562. s.preallocate.Store(x)
  563. }
  564. func (s *Store) GetPreallocate() bool {
  565. return s.preallocate.Load()
  566. }
  567. func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
  568. volumeSizeLimit := s.GetVolumeSizeLimit()
  569. if volumeSizeLimit == 0 {
  570. return
  571. }
  572. var newMaxVolumeCount int32
  573. for _, diskLocation := range s.Locations {
  574. if diskLocation.OriginalMaxVolumeCount == 0 {
  575. currentMaxVolumeCount := atomic.LoadInt32(&diskLocation.MaxVolumeCount)
  576. diskStatus := stats.NewDiskStatus(diskLocation.Directory)
  577. var unusedSpace uint64 = 0
  578. unclaimedSpaces := int64(diskStatus.Free)
  579. if !s.GetPreallocate() {
  580. unusedSpace = diskLocation.UnUsedSpace(volumeSizeLimit)
  581. unclaimedSpaces -= int64(unusedSpace)
  582. }
  583. volCount := diskLocation.VolumesLen()
  584. ecShardCount := diskLocation.EcShardCount()
  585. maxVolumeCount := int32(volCount) + int32((ecShardCount+erasure_coding.DataShardsCount)/erasure_coding.DataShardsCount)
  586. if unclaimedSpaces > int64(volumeSizeLimit) {
  587. maxVolumeCount += int32(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
  588. }
  589. newMaxVolumeCount = newMaxVolumeCount + maxVolumeCount
  590. atomic.StoreInt32(&diskLocation.MaxVolumeCount, maxVolumeCount)
  591. glog.V(4).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
  592. diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
  593. hasChanges = hasChanges || currentMaxVolumeCount != atomic.LoadInt32(&diskLocation.MaxVolumeCount)
  594. } else {
  595. newMaxVolumeCount = newMaxVolumeCount + diskLocation.OriginalMaxVolumeCount
  596. }
  597. }
  598. stats.VolumeServerMaxVolumeCounter.Set(float64(newMaxVolumeCount))
  599. return
  600. }