You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

425 lines
11 KiB

6 years ago
6 years ago
6 years ago
6 years ago
adding locking to avoid nil VolumeLocationList fix panic: runtime error: invalid memory address or nil pointer dereference Oct 22 00:53:44 bedb-master1 weed[8055]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x17658da] Oct 22 00:53:44 bedb-master1 weed[8055]: goroutine 310 [running]: Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLocationList).Length(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_location_list.go:35 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).enoughCopies(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:376 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).ensureCorrectWritables(0xc000111d50, 0xc000b55438) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:202 +0x5a Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*Topology).SyncDataNodeRegistration(0xc00042ac60, 0xc001454d30, 0x1, 0x1, 0xc0005fc000, 0xc00135de40, 0x4, 0xc00135de50, 0x10, 0x10d, ...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/topology.go:224 +0x616 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/server.(*MasterServer).SendHeartbeat(0xc000162700, 0x23b97c0, 0xc000ae2c90, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/server/master_grpc_server.go:106 +0x325 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/pb/master_pb._Seaweed_SendHeartbeat_Handler(0x1f8e7c0, 0xc000162700, 0x23b0a60, 0xc00024b440, 0x3172c38, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/pb/master_pb/master.pb.go:4250 +0xad Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).processStreamingRPC(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0xc0001fea80, 0x311fec0, 0x0, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1329 +0xcd8 Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).handleStream(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1409 +0xc5c Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).serveStreams.func1.1(0xc0001ce8b0, 0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:746 +0xa5 Oct 22 00:53:44 bedb-master1 weed[8055]: created by google.golang.org/grpc.(*Server).serveStreams.func1 Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:744 +0xa5 Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Main process exited, code=exited, status=2/INVALIDARGUMENT Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Failed with result 'exit-code'.
4 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "math/rand"
  6. "sync"
  7. "time"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/storage"
  10. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  11. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  12. )
  13. type copyState int
  14. const (
  15. noCopies copyState = 0 + iota
  16. insufficientCopies
  17. enoughCopies
  18. )
  19. type volumeState string
  20. const (
  21. readOnlyState volumeState = "ReadOnly"
  22. oversizedState = "Oversized"
  23. )
  24. type stateIndicator func(copyState) bool
  25. func ExistCopies() stateIndicator {
  26. return func(state copyState) bool { return state != noCopies }
  27. }
  28. func NoCopies() stateIndicator {
  29. return func(state copyState) bool { return state == noCopies }
  30. }
  31. type volumesBinaryState struct {
  32. rp *super_block.ReplicaPlacement
  33. name volumeState // the name for volume state (eg. "Readonly", "Oversized")
  34. indicator stateIndicator // indicate whether the volumes should be marked as `name`
  35. copyMap map[needle.VolumeId]*VolumeLocationList
  36. }
  37. func NewVolumesBinaryState(name volumeState, rp *super_block.ReplicaPlacement, indicator stateIndicator) *volumesBinaryState {
  38. return &volumesBinaryState{
  39. rp: rp,
  40. name: name,
  41. indicator: indicator,
  42. copyMap: make(map[needle.VolumeId]*VolumeLocationList),
  43. }
  44. }
  45. func (v *volumesBinaryState) Dump() (res []uint32) {
  46. for vid, list := range v.copyMap {
  47. if v.indicator(v.copyState(list)) {
  48. res = append(res, uint32(vid))
  49. }
  50. }
  51. return
  52. }
  53. func (v *volumesBinaryState) IsTrue(vid needle.VolumeId) bool {
  54. list, _ := v.copyMap[vid]
  55. return v.indicator(v.copyState(list))
  56. }
  57. func (v *volumesBinaryState) Add(vid needle.VolumeId, dn *DataNode) {
  58. list, _ := v.copyMap[vid]
  59. if list != nil {
  60. list.Set(dn)
  61. return
  62. }
  63. list = NewVolumeLocationList()
  64. list.Set(dn)
  65. v.copyMap[vid] = list
  66. }
  67. func (v *volumesBinaryState) Remove(vid needle.VolumeId, dn *DataNode) {
  68. list, _ := v.copyMap[vid]
  69. if list != nil {
  70. list.Remove(dn)
  71. if list.Length() == 0 {
  72. delete(v.copyMap, vid)
  73. }
  74. }
  75. }
  76. func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
  77. if list == nil {
  78. return noCopies
  79. }
  80. if list.Length() < v.rp.GetCopyCount() {
  81. return insufficientCopies
  82. }
  83. return enoughCopies
  84. }
  85. // mapping from volume to its locations, inverted from server to volume
  86. type VolumeLayout struct {
  87. rp *super_block.ReplicaPlacement
  88. ttl *needle.TTL
  89. vid2location map[needle.VolumeId]*VolumeLocationList
  90. writables []needle.VolumeId // transient array of writable volume id
  91. readonlyVolumes *volumesBinaryState // readonly volumes
  92. oversizedVolumes *volumesBinaryState // oversized volumes
  93. volumeSizeLimit uint64
  94. replicationAsMin bool
  95. accessLock sync.RWMutex
  96. }
  97. type VolumeLayoutStats struct {
  98. TotalSize uint64
  99. UsedSize uint64
  100. FileCount uint64
  101. }
  102. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
  103. return &VolumeLayout{
  104. rp: rp,
  105. ttl: ttl,
  106. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  107. writables: *new([]needle.VolumeId),
  108. readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
  109. oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()),
  110. volumeSizeLimit: volumeSizeLimit,
  111. replicationAsMin: replicationAsMin,
  112. }
  113. }
  114. func (vl *VolumeLayout) String() string {
  115. return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
  116. }
  117. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  118. vl.accessLock.Lock()
  119. defer vl.accessLock.Unlock()
  120. defer vl.ensureCorrectWritables(v)
  121. defer vl.rememberOversizedVolume(v, dn)
  122. if _, ok := vl.vid2location[v.Id]; !ok {
  123. vl.vid2location[v.Id] = NewVolumeLocationList()
  124. }
  125. vl.vid2location[v.Id].Set(dn)
  126. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  127. for _, dn := range vl.vid2location[v.Id].list {
  128. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  129. if vInfo.ReadOnly {
  130. glog.V(1).Infof("vid %d removed from writable", v.Id)
  131. vl.removeFromWritable(v.Id)
  132. vl.readonlyVolumes.Add(v.Id, dn)
  133. return
  134. } else {
  135. vl.readonlyVolumes.Remove(v.Id, dn)
  136. }
  137. } else {
  138. glog.V(1).Infof("vid %d removed from writable", v.Id)
  139. vl.removeFromWritable(v.Id)
  140. vl.readonlyVolumes.Remove(v.Id, dn)
  141. return
  142. }
  143. }
  144. }
  145. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo, dn *DataNode) {
  146. if vl.isOversized(v) {
  147. vl.oversizedVolumes.Add(v.Id, dn)
  148. } else {
  149. vl.oversizedVolumes.Remove(v.Id, dn)
  150. }
  151. }
  152. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  153. vl.accessLock.Lock()
  154. defer vl.accessLock.Unlock()
  155. // remove from vid2location map
  156. location, ok := vl.vid2location[v.Id]
  157. if !ok {
  158. return
  159. }
  160. if location.Remove(dn) {
  161. vl.readonlyVolumes.Remove(v.Id, dn)
  162. vl.oversizedVolumes.Remove(v.Id, dn)
  163. vl.ensureCorrectWritables(v)
  164. if location.Length() == 0 {
  165. delete(vl.vid2location, v.Id)
  166. }
  167. }
  168. }
  169. func (vl *VolumeLayout) EnsureCorrectWritables(v *storage.VolumeInfo) {
  170. vl.accessLock.Lock()
  171. defer vl.accessLock.Unlock()
  172. vl.ensureCorrectWritables(v)
  173. }
  174. func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) {
  175. if vl.enoughCopies(v.Id) && vl.isWritable(v) {
  176. if !vl.oversizedVolumes.IsTrue(v.Id) {
  177. vl.setVolumeWritable(v.Id)
  178. }
  179. } else {
  180. vl.removeFromWritable(v.Id)
  181. }
  182. }
  183. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  184. return uint64(v.Size) >= vl.volumeSizeLimit
  185. }
  186. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  187. return !vl.isOversized(v) &&
  188. v.Version == needle.CurrentVersion &&
  189. !v.ReadOnly
  190. }
  191. func (vl *VolumeLayout) isEmpty() bool {
  192. vl.accessLock.RLock()
  193. defer vl.accessLock.RUnlock()
  194. return len(vl.vid2location) == 0
  195. }
  196. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  197. vl.accessLock.RLock()
  198. defer vl.accessLock.RUnlock()
  199. if location := vl.vid2location[vid]; location != nil {
  200. return location.list
  201. }
  202. return nil
  203. }
  204. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  205. vl.accessLock.RLock()
  206. defer vl.accessLock.RUnlock()
  207. for _, location := range vl.vid2location {
  208. nodes = append(nodes, location.list...)
  209. }
  210. return
  211. }
  212. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
  213. vl.accessLock.RLock()
  214. defer vl.accessLock.RUnlock()
  215. lenWriters := len(vl.writables)
  216. if lenWriters <= 0 {
  217. glog.V(0).Infoln("No more writable volumes!")
  218. return nil, 0, nil, errors.New("No more writable volumes!")
  219. }
  220. if option.DataCenter == "" {
  221. vid := vl.writables[rand.Intn(lenWriters)]
  222. locationList := vl.vid2location[vid]
  223. if locationList != nil {
  224. return &vid, count, locationList, nil
  225. }
  226. return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  227. }
  228. var vid needle.VolumeId
  229. var locationList *VolumeLocationList
  230. counter := 0
  231. for _, v := range vl.writables {
  232. volumeLocationList := vl.vid2location[v]
  233. for _, dn := range volumeLocationList.list {
  234. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  235. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  236. continue
  237. }
  238. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  239. continue
  240. }
  241. counter++
  242. if rand.Intn(counter) < 1 {
  243. vid, locationList = v, volumeLocationList
  244. }
  245. }
  246. }
  247. }
  248. return &vid, count, locationList, nil
  249. }
  250. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
  251. vl.accessLock.RLock()
  252. defer vl.accessLock.RUnlock()
  253. if option.DataCenter == "" {
  254. return len(vl.writables)
  255. }
  256. counter := 0
  257. for _, v := range vl.writables {
  258. for _, dn := range vl.vid2location[v].list {
  259. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  260. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  261. continue
  262. }
  263. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  264. continue
  265. }
  266. counter++
  267. }
  268. }
  269. }
  270. return counter
  271. }
  272. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  273. toDeleteIndex := -1
  274. for k, id := range vl.writables {
  275. if id == vid {
  276. toDeleteIndex = k
  277. break
  278. }
  279. }
  280. if toDeleteIndex >= 0 {
  281. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  282. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  283. return true
  284. }
  285. return false
  286. }
  287. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  288. for _, v := range vl.writables {
  289. if v == vid {
  290. return false
  291. }
  292. }
  293. glog.V(0).Infoln("Volume", vid, "becomes writable")
  294. vl.writables = append(vl.writables, vid)
  295. return true
  296. }
  297. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  298. vl.accessLock.Lock()
  299. defer vl.accessLock.Unlock()
  300. if location, ok := vl.vid2location[vid]; ok {
  301. if location.Remove(dn) {
  302. vl.readonlyVolumes.Remove(vid, dn)
  303. vl.oversizedVolumes.Remove(vid, dn)
  304. if location.Length() < vl.rp.GetCopyCount() {
  305. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  306. return vl.removeFromWritable(vid)
  307. }
  308. }
  309. }
  310. return false
  311. }
  312. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
  313. vl.accessLock.Lock()
  314. defer vl.accessLock.Unlock()
  315. vInfo, err := dn.GetVolumesById(vid)
  316. if err != nil {
  317. return false
  318. }
  319. vl.vid2location[vid].Set(dn)
  320. if vInfo.ReadOnly || isReadOnly {
  321. return false
  322. }
  323. if vl.enoughCopies(vid) {
  324. return vl.setVolumeWritable(vid)
  325. }
  326. return false
  327. }
  328. func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
  329. locations := vl.vid2location[vid].Length()
  330. desired := vl.rp.GetCopyCount()
  331. return locations == desired || (vl.replicationAsMin && locations > desired)
  332. }
  333. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  334. vl.accessLock.Lock()
  335. defer vl.accessLock.Unlock()
  336. // glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
  337. return vl.removeFromWritable(vid)
  338. }
  339. func (vl *VolumeLayout) ToMap() map[string]interface{} {
  340. m := make(map[string]interface{})
  341. m["replication"] = vl.rp.String()
  342. m["ttl"] = vl.ttl.String()
  343. m["writables"] = vl.writables
  344. //m["locations"] = vl.vid2location
  345. return m
  346. }
  347. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  348. vl.accessLock.RLock()
  349. defer vl.accessLock.RUnlock()
  350. ret := &VolumeLayoutStats{}
  351. freshThreshold := time.Now().Unix() - 60
  352. for vid, vll := range vl.vid2location {
  353. size, fileCount := vll.Stats(vid, freshThreshold)
  354. ret.FileCount += uint64(fileCount)
  355. ret.UsedSize += size
  356. if vl.readonlyVolumes.IsTrue(vid) {
  357. ret.TotalSize += size
  358. } else {
  359. ret.TotalSize += vl.volumeSizeLimit
  360. }
  361. }
  362. return ret
  363. }