You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

513 lines
13 KiB

6 years ago
6 years ago
4 years ago
6 years ago
6 years ago
adding locking to avoid nil VolumeLocationList fix panic: runtime error: invalid memory address or nil pointer dereference Oct 22 00:53:44 bedb-master1 weed[8055]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x17658da] Oct 22 00:53:44 bedb-master1 weed[8055]: goroutine 310 [running]: Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLocationList).Length(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_location_list.go:35 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).enoughCopies(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:376 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).ensureCorrectWritables(0xc000111d50, 0xc000b55438) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:202 +0x5a Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*Topology).SyncDataNodeRegistration(0xc00042ac60, 0xc001454d30, 0x1, 0x1, 0xc0005fc000, 0xc00135de40, 0x4, 0xc00135de50, 0x10, 0x10d, ...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/topology.go:224 +0x616 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/server.(*MasterServer).SendHeartbeat(0xc000162700, 0x23b97c0, 0xc000ae2c90, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/server/master_grpc_server.go:106 +0x325 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/pb/master_pb._Seaweed_SendHeartbeat_Handler(0x1f8e7c0, 0xc000162700, 0x23b0a60, 0xc00024b440, 0x3172c38, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/pb/master_pb/master.pb.go:4250 +0xad Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).processStreamingRPC(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0xc0001fea80, 0x311fec0, 0x0, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1329 +0xcd8 Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).handleStream(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1409 +0xc5c Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).serveStreams.func1.1(0xc0001ce8b0, 0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:746 +0xa5 Oct 22 00:53:44 bedb-master1 weed[8055]: created by google.golang.org/grpc.(*Server).serveStreams.func1 Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:744 +0xa5 Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Main process exited, code=exited, status=2/INVALIDARGUMENT Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Failed with result 'exit-code'.
4 years ago
adding locking to avoid nil VolumeLocationList fix panic: runtime error: invalid memory address or nil pointer dereference Oct 22 00:53:44 bedb-master1 weed[8055]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x17658da] Oct 22 00:53:44 bedb-master1 weed[8055]: goroutine 310 [running]: Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLocationList).Length(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_location_list.go:35 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).enoughCopies(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:376 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).ensureCorrectWritables(0xc000111d50, 0xc000b55438) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:202 +0x5a Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*Topology).SyncDataNodeRegistration(0xc00042ac60, 0xc001454d30, 0x1, 0x1, 0xc0005fc000, 0xc00135de40, 0x4, 0xc00135de50, 0x10, 0x10d, ...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/topology.go:224 +0x616 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/server.(*MasterServer).SendHeartbeat(0xc000162700, 0x23b97c0, 0xc000ae2c90, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/server/master_grpc_server.go:106 +0x325 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/pb/master_pb._Seaweed_SendHeartbeat_Handler(0x1f8e7c0, 0xc000162700, 0x23b0a60, 0xc00024b440, 0x3172c38, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/pb/master_pb/master.pb.go:4250 +0xad Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).processStreamingRPC(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0xc0001fea80, 0x311fec0, 0x0, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1329 +0xcd8 Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).handleStream(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1409 +0xc5c Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).serveStreams.func1.1(0xc0001ce8b0, 0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:746 +0xa5 Oct 22 00:53:44 bedb-master1 weed[8055]: created by google.golang.org/grpc.(*Server).serveStreams.func1 Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:744 +0xa5 Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Main process exited, code=exited, status=2/INVALIDARGUMENT Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Failed with result 'exit-code'.
4 years ago
6 years ago
6 years ago
6 years ago
6 years ago
3 years ago
3 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  6. "math/rand"
  7. "sync"
  8. "sync/atomic"
  9. "time"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/storage"
  12. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  14. )
  15. type copyState int
  16. const (
  17. noCopies copyState = 0 + iota
  18. insufficientCopies
  19. enoughCopies
  20. )
  21. type volumeState string
  22. const (
  23. readOnlyState volumeState = "ReadOnly"
  24. oversizedState = "Oversized"
  25. crowdedState = "Crowded"
  26. )
  27. type stateIndicator func(copyState) bool
  28. func ExistCopies() stateIndicator {
  29. return func(state copyState) bool { return state != noCopies }
  30. }
  31. func NoCopies() stateIndicator {
  32. return func(state copyState) bool { return state == noCopies }
  33. }
  34. type volumesBinaryState struct {
  35. rp *super_block.ReplicaPlacement
  36. name volumeState // the name for volume state (eg. "Readonly", "Oversized")
  37. indicator stateIndicator // indicate whether the volumes should be marked as `name`
  38. copyMap map[needle.VolumeId]*VolumeLocationList
  39. }
  40. func NewVolumesBinaryState(name volumeState, rp *super_block.ReplicaPlacement, indicator stateIndicator) *volumesBinaryState {
  41. return &volumesBinaryState{
  42. rp: rp,
  43. name: name,
  44. indicator: indicator,
  45. copyMap: make(map[needle.VolumeId]*VolumeLocationList),
  46. }
  47. }
  48. func (v *volumesBinaryState) Dump() (res []uint32) {
  49. for vid, list := range v.copyMap {
  50. if v.indicator(v.copyState(list)) {
  51. res = append(res, uint32(vid))
  52. }
  53. }
  54. return
  55. }
  56. func (v *volumesBinaryState) IsTrue(vid needle.VolumeId) bool {
  57. list, _ := v.copyMap[vid]
  58. return v.indicator(v.copyState(list))
  59. }
  60. func (v *volumesBinaryState) Add(vid needle.VolumeId, dn *DataNode) {
  61. list, _ := v.copyMap[vid]
  62. if list != nil {
  63. list.Set(dn)
  64. return
  65. }
  66. list = NewVolumeLocationList()
  67. list.Set(dn)
  68. v.copyMap[vid] = list
  69. }
  70. func (v *volumesBinaryState) Remove(vid needle.VolumeId, dn *DataNode) {
  71. list, _ := v.copyMap[vid]
  72. if list != nil {
  73. list.Remove(dn)
  74. if list.Length() == 0 {
  75. delete(v.copyMap, vid)
  76. }
  77. }
  78. }
  79. func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
  80. if list == nil {
  81. return noCopies
  82. }
  83. if list.Length() < v.rp.GetCopyCount() {
  84. return insufficientCopies
  85. }
  86. return enoughCopies
  87. }
  88. // mapping from volume to its locations, inverted from server to volume
  89. type VolumeLayout struct {
  90. growRequestCount int32
  91. growRequestTime time.Time
  92. rp *super_block.ReplicaPlacement
  93. ttl *needle.TTL
  94. diskType types.DiskType
  95. vid2location map[needle.VolumeId]*VolumeLocationList
  96. writables []needle.VolumeId // transient array of writable volume id
  97. crowded map[needle.VolumeId]struct{}
  98. readonlyVolumes *volumesBinaryState // readonly volumes
  99. oversizedVolumes *volumesBinaryState // oversized volumes
  100. volumeSizeLimit uint64
  101. replicationAsMin bool
  102. accessLock sync.RWMutex
  103. }
  104. type VolumeLayoutStats struct {
  105. TotalSize uint64
  106. UsedSize uint64
  107. FileCount uint64
  108. }
  109. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
  110. return &VolumeLayout{
  111. rp: rp,
  112. ttl: ttl,
  113. diskType: diskType,
  114. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  115. writables: *new([]needle.VolumeId),
  116. crowded: make(map[needle.VolumeId]struct{}),
  117. readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
  118. oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()),
  119. volumeSizeLimit: volumeSizeLimit,
  120. replicationAsMin: replicationAsMin,
  121. }
  122. }
  123. func (vl *VolumeLayout) String() string {
  124. return fmt.Sprintf("rp:%v, ttl:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.writables, vl.volumeSizeLimit)
  125. }
  126. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  127. vl.accessLock.Lock()
  128. defer vl.accessLock.Unlock()
  129. defer vl.rememberOversizedVolume(v, dn)
  130. if _, ok := vl.vid2location[v.Id]; !ok {
  131. vl.vid2location[v.Id] = NewVolumeLocationList()
  132. }
  133. vl.vid2location[v.Id].Set(dn)
  134. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  135. for _, dn := range vl.vid2location[v.Id].list {
  136. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  137. if vInfo.ReadOnly {
  138. glog.V(1).Infof("vid %d removed from writable", v.Id)
  139. vl.removeFromWritable(v.Id)
  140. vl.readonlyVolumes.Add(v.Id, dn)
  141. return
  142. } else {
  143. vl.readonlyVolumes.Remove(v.Id, dn)
  144. }
  145. } else {
  146. glog.V(1).Infof("vid %d removed from writable", v.Id)
  147. vl.removeFromWritable(v.Id)
  148. vl.readonlyVolumes.Remove(v.Id, dn)
  149. return
  150. }
  151. }
  152. }
  153. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo, dn *DataNode) {
  154. if vl.isOversized(v) {
  155. vl.oversizedVolumes.Add(v.Id, dn)
  156. } else {
  157. vl.oversizedVolumes.Remove(v.Id, dn)
  158. }
  159. }
  160. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  161. vl.accessLock.Lock()
  162. defer vl.accessLock.Unlock()
  163. // remove from vid2location map
  164. location, ok := vl.vid2location[v.Id]
  165. if !ok {
  166. return
  167. }
  168. if location.Remove(dn) {
  169. vl.readonlyVolumes.Remove(v.Id, dn)
  170. vl.oversizedVolumes.Remove(v.Id, dn)
  171. vl.ensureCorrectWritables(v.Id)
  172. if location.Length() == 0 {
  173. delete(vl.vid2location, v.Id)
  174. }
  175. }
  176. }
  177. func (vl *VolumeLayout) EnsureCorrectWritables(v *storage.VolumeInfo) {
  178. vl.accessLock.Lock()
  179. defer vl.accessLock.Unlock()
  180. vl.ensureCorrectWritables(v.Id)
  181. }
  182. func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) {
  183. if vl.enoughCopies(vid) && vl.isAllWritable(vid) {
  184. if !vl.oversizedVolumes.IsTrue(vid) {
  185. vl.setVolumeWritable(vid)
  186. }
  187. } else {
  188. if !vl.enoughCopies(vid) {
  189. glog.V(0).Infof("volume %d does not have enough copies", vid)
  190. }
  191. if !vl.isAllWritable(vid) {
  192. glog.V(0).Infof("volume %d are not all writable", vid)
  193. }
  194. glog.V(0).Infof("volume %d remove from writable", vid)
  195. vl.removeFromWritable(vid)
  196. }
  197. }
  198. func (vl *VolumeLayout) isAllWritable(vid needle.VolumeId) bool {
  199. for _, dn := range vl.vid2location[vid].list {
  200. if v, getError := dn.GetVolumesById(vid); getError == nil {
  201. if v.ReadOnly {
  202. return false
  203. }
  204. }
  205. }
  206. return true
  207. }
  208. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  209. return uint64(v.Size) >= vl.volumeSizeLimit
  210. }
  211. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  212. return !vl.isOversized(v) &&
  213. v.Version == needle.CurrentVersion &&
  214. !v.ReadOnly
  215. }
  216. func (vl *VolumeLayout) isEmpty() bool {
  217. vl.accessLock.RLock()
  218. defer vl.accessLock.RUnlock()
  219. return len(vl.vid2location) == 0
  220. }
  221. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  222. vl.accessLock.RLock()
  223. defer vl.accessLock.RUnlock()
  224. if location := vl.vid2location[vid]; location != nil {
  225. return location.list
  226. }
  227. return nil
  228. }
  229. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  230. vl.accessLock.RLock()
  231. defer vl.accessLock.RUnlock()
  232. for _, location := range vl.vid2location {
  233. nodes = append(nodes, location.list...)
  234. }
  235. return
  236. }
  237. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
  238. vl.accessLock.RLock()
  239. defer vl.accessLock.RUnlock()
  240. lenWriters := len(vl.writables)
  241. if lenWriters <= 0 {
  242. //glog.V(0).Infoln("No more writable volumes!")
  243. return nil, 0, nil, errors.New("No more writable volumes!")
  244. }
  245. if option.DataCenter == "" && option.Rack == "" && option.DataNode == "" {
  246. vid := vl.writables[rand.Intn(lenWriters)]
  247. locationList := vl.vid2location[vid]
  248. if locationList != nil {
  249. return &vid, count, locationList, nil
  250. }
  251. return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  252. }
  253. var vid needle.VolumeId
  254. var locationList *VolumeLocationList
  255. counter := 0
  256. for _, v := range vl.writables {
  257. volumeLocationList := vl.vid2location[v]
  258. for _, dn := range volumeLocationList.list {
  259. if option.DataCenter != "" && dn.GetDataCenter().Id() != NodeId(option.DataCenter) {
  260. continue
  261. }
  262. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  263. continue
  264. }
  265. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  266. continue
  267. }
  268. counter++
  269. if rand.Intn(counter) < 1 {
  270. vid, locationList = v, volumeLocationList.Copy()
  271. }
  272. }
  273. }
  274. return &vid, count, locationList, nil
  275. }
  276. func (vl *VolumeLayout) HasGrowRequest() bool {
  277. if atomic.LoadInt32(&vl.growRequestCount) > 0 &&
  278. vl.growRequestTime.Add(time.Minute).After(time.Now()) {
  279. return true
  280. }
  281. return false
  282. }
  283. func (vl *VolumeLayout) AddGrowRequest() {
  284. vl.growRequestTime = time.Now()
  285. atomic.AddInt32(&vl.growRequestCount, 1)
  286. }
  287. func (vl *VolumeLayout) DoneGrowRequest() {
  288. vl.growRequestTime = time.Unix(0, 0)
  289. atomic.StoreInt32(&vl.growRequestCount, 0)
  290. }
  291. func (vl *VolumeLayout) ShouldGrowVolumes(option *VolumeGrowOption) bool {
  292. active, crowded := vl.GetActiveVolumeCount(option)
  293. //glog.V(0).Infof("active volume: %d, high usage volume: %d\n", active, high)
  294. return active <= crowded
  295. }
  296. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) (active, crowded int) {
  297. vl.accessLock.RLock()
  298. defer vl.accessLock.RUnlock()
  299. if option.DataCenter == "" {
  300. return len(vl.writables), len(vl.crowded)
  301. }
  302. for _, v := range vl.writables {
  303. for _, dn := range vl.vid2location[v].list {
  304. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  305. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  306. continue
  307. }
  308. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  309. continue
  310. }
  311. active++
  312. info, _ := dn.GetVolumesById(v)
  313. if float64(info.Size) > float64(vl.volumeSizeLimit)*option.Threshold() {
  314. crowded++
  315. }
  316. }
  317. }
  318. }
  319. return
  320. }
  321. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  322. toDeleteIndex := -1
  323. for k, id := range vl.writables {
  324. if id == vid {
  325. toDeleteIndex = k
  326. break
  327. }
  328. }
  329. if toDeleteIndex >= 0 {
  330. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  331. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  332. vl.removeFromCrowded(vid)
  333. return true
  334. }
  335. return false
  336. }
  337. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  338. for _, v := range vl.writables {
  339. if v == vid {
  340. return false
  341. }
  342. }
  343. glog.V(0).Infoln("Volume", vid, "becomes writable")
  344. vl.writables = append(vl.writables, vid)
  345. return true
  346. }
  347. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  348. vl.accessLock.Lock()
  349. defer vl.accessLock.Unlock()
  350. if location, ok := vl.vid2location[vid]; ok {
  351. if location.Remove(dn) {
  352. vl.readonlyVolumes.Remove(vid, dn)
  353. vl.oversizedVolumes.Remove(vid, dn)
  354. if location.Length() < vl.rp.GetCopyCount() {
  355. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  356. return vl.removeFromWritable(vid)
  357. }
  358. }
  359. }
  360. return false
  361. }
  362. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
  363. vl.accessLock.Lock()
  364. defer vl.accessLock.Unlock()
  365. vInfo, err := dn.GetVolumesById(vid)
  366. if err != nil {
  367. return false
  368. }
  369. vl.vid2location[vid].Set(dn)
  370. if vInfo.ReadOnly || isReadOnly {
  371. return false
  372. }
  373. if vl.enoughCopies(vid) {
  374. return vl.setVolumeWritable(vid)
  375. }
  376. return false
  377. }
  378. func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
  379. locations := vl.vid2location[vid].Length()
  380. desired := vl.rp.GetCopyCount()
  381. return locations == desired || (vl.replicationAsMin && locations > desired)
  382. }
  383. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  384. vl.accessLock.Lock()
  385. defer vl.accessLock.Unlock()
  386. wasWritable := vl.removeFromWritable(vid)
  387. if wasWritable {
  388. glog.V(0).Infof("Volume %d reaches full capacity.", vid)
  389. }
  390. return wasWritable
  391. }
  392. func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) {
  393. delete(vl.crowded, vid)
  394. }
  395. func (vl *VolumeLayout) setVolumeCrowded(vid needle.VolumeId) {
  396. if _, ok := vl.crowded[vid]; !ok {
  397. vl.crowded[vid] = struct{}{}
  398. glog.V(0).Infoln("Volume", vid, "becomes crowded")
  399. }
  400. }
  401. func (vl *VolumeLayout) SetVolumeCrowded(vid needle.VolumeId) {
  402. // since delete is guarded by accessLock.Lock(),
  403. // and is always called in sequential order,
  404. // RLock() should be safe enough
  405. vl.accessLock.RLock()
  406. defer vl.accessLock.RUnlock()
  407. for _, v := range vl.writables {
  408. if v == vid {
  409. vl.setVolumeCrowded(vid)
  410. break
  411. }
  412. }
  413. }
  414. type VolumeLayoutInfo struct {
  415. Replication string `json:"replication"`
  416. TTL string `json:"ttl"`
  417. Writables []needle.VolumeId `json:"writables"`
  418. Collection string `json:"collection"`
  419. }
  420. func (vl *VolumeLayout) ToInfo() (info VolumeLayoutInfo) {
  421. info.Replication = vl.rp.String()
  422. info.TTL = vl.ttl.String()
  423. info.Writables = vl.writables
  424. //m["locations"] = vl.vid2location
  425. return
  426. }
  427. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  428. vl.accessLock.RLock()
  429. defer vl.accessLock.RUnlock()
  430. ret := &VolumeLayoutStats{}
  431. freshThreshold := time.Now().Unix() - 60
  432. for vid, vll := range vl.vid2location {
  433. size, fileCount := vll.Stats(vid, freshThreshold)
  434. ret.FileCount += uint64(fileCount)
  435. ret.UsedSize += size * uint64(vll.Length())
  436. if vl.readonlyVolumes.IsTrue(vid) {
  437. ret.TotalSize += size * uint64(vll.Length())
  438. } else {
  439. ret.TotalSize += vl.volumeSizeLimit * uint64(vll.Length())
  440. }
  441. }
  442. return ret
  443. }