You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

581 lines
16 KiB

6 years ago
6 years ago
4 years ago
6 years ago
6 years ago
adding locking to avoid nil VolumeLocationList fix panic: runtime error: invalid memory address or nil pointer dereference Oct 22 00:53:44 bedb-master1 weed[8055]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x17658da] Oct 22 00:53:44 bedb-master1 weed[8055]: goroutine 310 [running]: Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLocationList).Length(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_location_list.go:35 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).enoughCopies(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:376 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).ensureCorrectWritables(0xc000111d50, 0xc000b55438) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:202 +0x5a Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*Topology).SyncDataNodeRegistration(0xc00042ac60, 0xc001454d30, 0x1, 0x1, 0xc0005fc000, 0xc00135de40, 0x4, 0xc00135de50, 0x10, 0x10d, ...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/topology.go:224 +0x616 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/server.(*MasterServer).SendHeartbeat(0xc000162700, 0x23b97c0, 0xc000ae2c90, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/server/master_grpc_server.go:106 +0x325 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/pb/master_pb._Seaweed_SendHeartbeat_Handler(0x1f8e7c0, 0xc000162700, 0x23b0a60, 0xc00024b440, 0x3172c38, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/pb/master_pb/master.pb.go:4250 +0xad Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).processStreamingRPC(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0xc0001fea80, 0x311fec0, 0x0, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1329 +0xcd8 Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).handleStream(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1409 +0xc5c Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).serveStreams.func1.1(0xc0001ce8b0, 0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:746 +0xa5 Oct 22 00:53:44 bedb-master1 weed[8055]: created by google.golang.org/grpc.(*Server).serveStreams.func1 Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:744 +0xa5 Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Main process exited, code=exited, status=2/INVALIDARGUMENT Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Failed with result 'exit-code'.
4 years ago
adding locking to avoid nil VolumeLocationList fix panic: runtime error: invalid memory address or nil pointer dereference Oct 22 00:53:44 bedb-master1 weed[8055]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x17658da] Oct 22 00:53:44 bedb-master1 weed[8055]: goroutine 310 [running]: Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLocationList).Length(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_location_list.go:35 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).enoughCopies(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:376 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).ensureCorrectWritables(0xc000111d50, 0xc000b55438) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:202 +0x5a Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*Topology).SyncDataNodeRegistration(0xc00042ac60, 0xc001454d30, 0x1, 0x1, 0xc0005fc000, 0xc00135de40, 0x4, 0xc00135de50, 0x10, 0x10d, ...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/topology.go:224 +0x616 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/server.(*MasterServer).SendHeartbeat(0xc000162700, 0x23b97c0, 0xc000ae2c90, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/server/master_grpc_server.go:106 +0x325 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/pb/master_pb._Seaweed_SendHeartbeat_Handler(0x1f8e7c0, 0xc000162700, 0x23b0a60, 0xc00024b440, 0x3172c38, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/pb/master_pb/master.pb.go:4250 +0xad Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).processStreamingRPC(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0xc0001fea80, 0x311fec0, 0x0, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1329 +0xcd8 Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).handleStream(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1409 +0xc5c Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).serveStreams.func1.1(0xc0001ce8b0, 0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:746 +0xa5 Oct 22 00:53:44 bedb-master1 weed[8055]: created by google.golang.org/grpc.(*Server).serveStreams.func1 Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:744 +0xa5 Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Main process exited, code=exited, status=2/INVALIDARGUMENT Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Failed with result 'exit-code'.
4 years ago
6 years ago
6 years ago
3 years ago
3 years ago
7 months ago
6 years ago
6 years ago
6 years ago
6 years ago
5 months ago
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/stats"
  6. "math/rand"
  7. "sync"
  8. "sync/atomic"
  9. "time"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  11. "github.com/seaweedfs/seaweedfs/weed/glog"
  12. "github.com/seaweedfs/seaweedfs/weed/storage"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  15. )
  16. type copyState int
  17. const (
  18. noCopies copyState = 0 + iota
  19. insufficientCopies
  20. enoughCopies
  21. )
  22. type volumeState string
  23. const (
  24. readOnlyState volumeState = "ReadOnly"
  25. oversizedState = "Oversized"
  26. crowdedState = "Crowded"
  27. )
  28. type stateIndicator func(copyState) bool
  29. func ExistCopies() stateIndicator {
  30. return func(state copyState) bool { return state != noCopies }
  31. }
  32. func NoCopies() stateIndicator {
  33. return func(state copyState) bool { return state == noCopies }
  34. }
  35. type volumesBinaryState struct {
  36. rp *super_block.ReplicaPlacement
  37. name volumeState // the name for volume state (eg. "Readonly", "Oversized")
  38. indicator stateIndicator // indicate whether the volumes should be marked as `name`
  39. copyMap map[needle.VolumeId]*VolumeLocationList
  40. }
  41. func NewVolumesBinaryState(name volumeState, rp *super_block.ReplicaPlacement, indicator stateIndicator) *volumesBinaryState {
  42. return &volumesBinaryState{
  43. rp: rp,
  44. name: name,
  45. indicator: indicator,
  46. copyMap: make(map[needle.VolumeId]*VolumeLocationList),
  47. }
  48. }
  49. func (v *volumesBinaryState) Dump() (res []uint32) {
  50. for vid, list := range v.copyMap {
  51. if v.indicator(v.copyState(list)) {
  52. res = append(res, uint32(vid))
  53. }
  54. }
  55. return
  56. }
  57. func (v *volumesBinaryState) IsTrue(vid needle.VolumeId) bool {
  58. list, _ := v.copyMap[vid]
  59. return v.indicator(v.copyState(list))
  60. }
  61. func (v *volumesBinaryState) Add(vid needle.VolumeId, dn *DataNode) {
  62. list, _ := v.copyMap[vid]
  63. if list != nil {
  64. list.Set(dn)
  65. return
  66. }
  67. list = NewVolumeLocationList()
  68. list.Set(dn)
  69. v.copyMap[vid] = list
  70. }
  71. func (v *volumesBinaryState) Remove(vid needle.VolumeId, dn *DataNode) {
  72. list, _ := v.copyMap[vid]
  73. if list != nil {
  74. list.Remove(dn)
  75. if list.Length() == 0 {
  76. delete(v.copyMap, vid)
  77. }
  78. }
  79. }
  80. func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
  81. if list == nil {
  82. return noCopies
  83. }
  84. if list.Length() < v.rp.GetCopyCount() {
  85. return insufficientCopies
  86. }
  87. return enoughCopies
  88. }
  89. // mapping from volume to its locations, inverted from server to volume
  90. type VolumeLayout struct {
  91. growRequest atomic.Bool
  92. lastGrowCount atomic.Uint32
  93. rp *super_block.ReplicaPlacement
  94. ttl *needle.TTL
  95. diskType types.DiskType
  96. vid2location map[needle.VolumeId]*VolumeLocationList
  97. writables []needle.VolumeId // transient array of writable volume id
  98. crowded map[needle.VolumeId]struct{}
  99. readonlyVolumes *volumesBinaryState // readonly volumes
  100. oversizedVolumes *volumesBinaryState // oversized volumes
  101. vacuumedVolumes map[needle.VolumeId]time.Time
  102. volumeSizeLimit uint64
  103. replicationAsMin bool
  104. accessLock sync.RWMutex
  105. }
  106. type VolumeLayoutStats struct {
  107. TotalSize uint64
  108. UsedSize uint64
  109. FileCount uint64
  110. }
  111. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
  112. return &VolumeLayout{
  113. rp: rp,
  114. ttl: ttl,
  115. diskType: diskType,
  116. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  117. writables: *new([]needle.VolumeId),
  118. crowded: make(map[needle.VolumeId]struct{}),
  119. readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
  120. oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()),
  121. vacuumedVolumes: make(map[needle.VolumeId]time.Time),
  122. volumeSizeLimit: volumeSizeLimit,
  123. replicationAsMin: replicationAsMin,
  124. }
  125. }
  126. func (vl *VolumeLayout) String() string {
  127. return fmt.Sprintf("rp:%v, ttl:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.writables, vl.volumeSizeLimit)
  128. }
  129. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  130. vl.accessLock.Lock()
  131. defer vl.accessLock.Unlock()
  132. defer vl.rememberOversizedVolume(v, dn)
  133. if _, ok := vl.vid2location[v.Id]; !ok {
  134. vl.vid2location[v.Id] = NewVolumeLocationList()
  135. }
  136. vl.vid2location[v.Id].Set(dn)
  137. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  138. for _, dn := range vl.vid2location[v.Id].list {
  139. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  140. if vInfo.ReadOnly {
  141. glog.V(1).Infof("vid %d removed from writable", v.Id)
  142. vl.removeFromWritable(v.Id)
  143. vl.readonlyVolumes.Add(v.Id, dn)
  144. return
  145. } else {
  146. vl.readonlyVolumes.Remove(v.Id, dn)
  147. }
  148. } else {
  149. glog.V(1).Infof("vid %d removed from writable", v.Id)
  150. vl.removeFromWritable(v.Id)
  151. vl.readonlyVolumes.Remove(v.Id, dn)
  152. return
  153. }
  154. }
  155. }
  156. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo, dn *DataNode) {
  157. if vl.isOversized(v) {
  158. vl.oversizedVolumes.Add(v.Id, dn)
  159. } else {
  160. vl.oversizedVolumes.Remove(v.Id, dn)
  161. }
  162. }
  163. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  164. vl.accessLock.Lock()
  165. defer vl.accessLock.Unlock()
  166. // remove from vid2location map
  167. location, ok := vl.vid2location[v.Id]
  168. if !ok {
  169. return
  170. }
  171. if location.Remove(dn) {
  172. vl.readonlyVolumes.Remove(v.Id, dn)
  173. vl.oversizedVolumes.Remove(v.Id, dn)
  174. vl.ensureCorrectWritables(v.Id)
  175. if location.Length() == 0 {
  176. delete(vl.vid2location, v.Id)
  177. }
  178. }
  179. }
  180. func (vl *VolumeLayout) EnsureCorrectWritables(v *storage.VolumeInfo) {
  181. vl.accessLock.Lock()
  182. defer vl.accessLock.Unlock()
  183. vl.ensureCorrectWritables(v.Id)
  184. }
  185. func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) {
  186. if vl.enoughCopies(vid) && vl.isAllWritable(vid) {
  187. if !vl.oversizedVolumes.IsTrue(vid) {
  188. vl.setVolumeWritable(vid)
  189. }
  190. } else {
  191. if !vl.enoughCopies(vid) {
  192. glog.V(0).Infof("volume %d does not have enough copies", vid)
  193. }
  194. if !vl.isAllWritable(vid) {
  195. glog.V(0).Infof("volume %d are not all writable", vid)
  196. }
  197. glog.V(0).Infof("volume %d remove from writable", vid)
  198. vl.removeFromWritable(vid)
  199. }
  200. }
  201. func (vl *VolumeLayout) isAllWritable(vid needle.VolumeId) bool {
  202. if location, ok := vl.vid2location[vid]; ok {
  203. for _, dn := range location.list {
  204. if v, getError := dn.GetVolumesById(vid); getError == nil {
  205. if v.ReadOnly {
  206. return false
  207. }
  208. }
  209. }
  210. } else {
  211. return false
  212. }
  213. return true
  214. }
  215. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  216. return uint64(v.Size) >= vl.volumeSizeLimit
  217. }
  218. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  219. return !vl.isOversized(v) &&
  220. v.Version == needle.CurrentVersion &&
  221. !v.ReadOnly
  222. }
  223. func (vl *VolumeLayout) isEmpty() bool {
  224. vl.accessLock.RLock()
  225. defer vl.accessLock.RUnlock()
  226. return len(vl.vid2location) == 0
  227. }
  228. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  229. vl.accessLock.RLock()
  230. defer vl.accessLock.RUnlock()
  231. if location := vl.vid2location[vid]; location != nil {
  232. return location.list
  233. }
  234. return nil
  235. }
  236. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  237. vl.accessLock.RLock()
  238. defer vl.accessLock.RUnlock()
  239. for _, location := range vl.vid2location {
  240. nodes = append(nodes, location.list...)
  241. }
  242. return
  243. }
  244. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (vid needle.VolumeId, counter uint64, locationList *VolumeLocationList, shouldGrow bool, err error) {
  245. vl.accessLock.RLock()
  246. defer vl.accessLock.RUnlock()
  247. lenWriters := len(vl.writables)
  248. if lenWriters <= 0 {
  249. //glog.V(0).Infoln("No more writable volumes!")
  250. shouldGrow = true
  251. return 0, 0, nil, shouldGrow, errors.New("No more writable volumes!")
  252. }
  253. if option.DataCenter == "" && option.Rack == "" && option.DataNode == "" {
  254. vid := vl.writables[rand.Intn(lenWriters)]
  255. locationList = vl.vid2location[vid]
  256. if locationList != nil && locationList.Length() > 0 {
  257. // check whether picked file is close to full
  258. dn := locationList.Head()
  259. info, _ := dn.GetVolumesById(vid)
  260. if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
  261. shouldGrow = true
  262. }
  263. return vid, count, locationList.Copy(), shouldGrow, nil
  264. }
  265. return 0, 0, nil, shouldGrow, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  266. }
  267. // clone vl.writables
  268. writables := make([]needle.VolumeId, len(vl.writables))
  269. copy(writables, vl.writables)
  270. // randomize the writables
  271. rand.Shuffle(len(writables), func(i, j int) {
  272. writables[i], writables[j] = writables[j], writables[i]
  273. })
  274. for _, writableVolumeId := range writables {
  275. volumeLocationList := vl.vid2location[writableVolumeId]
  276. for _, dn := range volumeLocationList.list {
  277. if option.DataCenter != "" && dn.GetDataCenter().Id() != NodeId(option.DataCenter) {
  278. continue
  279. }
  280. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  281. continue
  282. }
  283. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  284. continue
  285. }
  286. vid, locationList = writableVolumeId, volumeLocationList.Copy()
  287. // check whether picked file is close to full
  288. info, _ := dn.GetVolumesById(writableVolumeId)
  289. if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
  290. shouldGrow = true
  291. }
  292. counter = count
  293. return
  294. }
  295. }
  296. return vid, count, locationList, true, fmt.Errorf("No writable volumes in DataCenter:%v Rack:%v DataNode:%v", option.DataCenter, option.Rack, option.DataNode)
  297. }
  298. func (vl *VolumeLayout) HasGrowRequest() bool {
  299. return vl.growRequest.Load()
  300. }
  301. func (vl *VolumeLayout) AddGrowRequest() {
  302. vl.growRequest.Store(true)
  303. }
  304. func (vl *VolumeLayout) DoneGrowRequest() {
  305. vl.growRequest.Store(false)
  306. }
  307. func (vl *VolumeLayout) SetLastGrowCount(count uint32) {
  308. if vl.lastGrowCount.Load() != count {
  309. vl.lastGrowCount.Store(count)
  310. }
  311. }
  312. func (vl *VolumeLayout) GetLastGrowCount() uint32 {
  313. return vl.lastGrowCount.Load()
  314. }
  315. func (vl *VolumeLayout) ShouldGrowVolumes(option *VolumeGrowOption) bool {
  316. total, active, crowded := vl.GetActiveVolumeCount(option)
  317. stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.DataCenter, "total").Set(float64(total))
  318. stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.DataCenter, "active").Set(float64(active))
  319. stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.DataCenter, "crowded").Set(float64(crowded))
  320. //glog.V(0).Infof("active volume: %d, high usage volume: %d\n", active, high)
  321. return active <= crowded
  322. }
  323. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) (total, active, crowded int) {
  324. vl.accessLock.RLock()
  325. defer vl.accessLock.RUnlock()
  326. if option.DataCenter == "" {
  327. return len(vl.writables), len(vl.writables), len(vl.crowded)
  328. }
  329. total = len(vl.writables)
  330. for _, v := range vl.writables {
  331. for _, dn := range vl.vid2location[v].list {
  332. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  333. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  334. continue
  335. }
  336. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  337. continue
  338. }
  339. active++
  340. info, _ := dn.GetVolumesById(v)
  341. if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
  342. crowded++
  343. }
  344. }
  345. }
  346. }
  347. return
  348. }
  349. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  350. toDeleteIndex := -1
  351. for k, id := range vl.writables {
  352. if id == vid {
  353. toDeleteIndex = k
  354. break
  355. }
  356. }
  357. if toDeleteIndex >= 0 {
  358. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  359. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  360. vl.removeFromCrowded(vid)
  361. return true
  362. }
  363. return false
  364. }
  365. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  366. for _, v := range vl.writables {
  367. if v == vid {
  368. return false
  369. }
  370. }
  371. glog.V(0).Infoln("Volume", vid, "becomes writable")
  372. vl.writables = append(vl.writables, vid)
  373. return true
  374. }
  375. func (vl *VolumeLayout) SetVolumeReadOnly(dn *DataNode, vid needle.VolumeId) bool {
  376. vl.accessLock.Lock()
  377. defer vl.accessLock.Unlock()
  378. if _, ok := vl.vid2location[vid]; ok {
  379. vl.readonlyVolumes.Add(vid, dn)
  380. return vl.removeFromWritable(vid)
  381. }
  382. return true
  383. }
  384. func (vl *VolumeLayout) SetVolumeWritable(dn *DataNode, vid needle.VolumeId) bool {
  385. vl.accessLock.Lock()
  386. defer vl.accessLock.Unlock()
  387. if _, ok := vl.vid2location[vid]; ok {
  388. vl.readonlyVolumes.Remove(vid, dn)
  389. }
  390. if vl.enoughCopies(vid) {
  391. return vl.setVolumeWritable(vid)
  392. }
  393. return false
  394. }
  395. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  396. vl.accessLock.Lock()
  397. defer vl.accessLock.Unlock()
  398. if location, ok := vl.vid2location[vid]; ok {
  399. if location.Remove(dn) {
  400. vl.readonlyVolumes.Remove(vid, dn)
  401. vl.oversizedVolumes.Remove(vid, dn)
  402. if location.Length() < vl.rp.GetCopyCount() {
  403. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  404. return vl.removeFromWritable(vid)
  405. }
  406. }
  407. }
  408. return false
  409. }
  410. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly, isFullCapacity bool) bool {
  411. vl.accessLock.Lock()
  412. defer vl.accessLock.Unlock()
  413. vInfo, err := dn.GetVolumesById(vid)
  414. if err != nil {
  415. return false
  416. }
  417. vl.vid2location[vid].Set(dn)
  418. if vInfo.ReadOnly || isReadOnly || isFullCapacity {
  419. return false
  420. }
  421. if vl.enoughCopies(vid) {
  422. return vl.setVolumeWritable(vid)
  423. }
  424. return false
  425. }
  426. func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
  427. locations := vl.vid2location[vid].Length()
  428. desired := vl.rp.GetCopyCount()
  429. return locations == desired || (vl.replicationAsMin && locations > desired)
  430. }
  431. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  432. vl.accessLock.Lock()
  433. defer vl.accessLock.Unlock()
  434. wasWritable := vl.removeFromWritable(vid)
  435. if wasWritable {
  436. glog.V(0).Infof("Volume %d reaches full capacity.", vid)
  437. }
  438. return wasWritable
  439. }
  440. func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) {
  441. delete(vl.crowded, vid)
  442. }
  443. func (vl *VolumeLayout) setVolumeCrowded(vid needle.VolumeId) {
  444. if _, ok := vl.crowded[vid]; !ok {
  445. vl.crowded[vid] = struct{}{}
  446. glog.V(0).Infoln("Volume", vid, "becomes crowded")
  447. }
  448. }
  449. func (vl *VolumeLayout) SetVolumeCrowded(vid needle.VolumeId) {
  450. // since delete is guarded by accessLock.Lock(),
  451. // and is always called in sequential order,
  452. // RLock() should be safe enough
  453. vl.accessLock.RLock()
  454. defer vl.accessLock.RUnlock()
  455. for _, v := range vl.writables {
  456. if v == vid {
  457. vl.setVolumeCrowded(vid)
  458. break
  459. }
  460. }
  461. }
  462. type VolumeLayoutInfo struct {
  463. Replication string `json:"replication"`
  464. TTL string `json:"ttl"`
  465. Writables []needle.VolumeId `json:"writables"`
  466. Collection string `json:"collection"`
  467. DiskType string `json:"diskType"`
  468. }
  469. func (vl *VolumeLayout) ToInfo() (info VolumeLayoutInfo) {
  470. info.Replication = vl.rp.String()
  471. info.TTL = vl.ttl.String()
  472. info.Writables = vl.writables
  473. info.DiskType = vl.diskType.ReadableString()
  474. //m["locations"] = vl.vid2location
  475. return
  476. }
  477. func (vl *VolumeLayout) ToGrowOption() (option *VolumeGrowOption) {
  478. option = &VolumeGrowOption{}
  479. option.ReplicaPlacement = vl.rp
  480. option.Ttl = vl.ttl
  481. option.DiskType = vl.diskType
  482. return
  483. }
  484. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  485. vl.accessLock.RLock()
  486. defer vl.accessLock.RUnlock()
  487. ret := &VolumeLayoutStats{}
  488. freshThreshold := time.Now().Unix() - 60
  489. for vid, vll := range vl.vid2location {
  490. size, fileCount := vll.Stats(vid, freshThreshold)
  491. ret.FileCount += uint64(fileCount)
  492. ret.UsedSize += size * uint64(vll.Length())
  493. if vl.readonlyVolumes.IsTrue(vid) {
  494. ret.TotalSize += size * uint64(vll.Length())
  495. } else {
  496. ret.TotalSize += vl.volumeSizeLimit * uint64(vll.Length())
  497. }
  498. }
  499. return ret
  500. }