You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

418 lines
11 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "math/rand"
  6. "sync"
  7. "time"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/storage"
  10. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  11. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  12. )
  13. type copyState int
  14. const (
  15. noCopies copyState = 0 + iota
  16. insufficientCopies
  17. enoughCopies
  18. )
  19. type volumeState string
  20. const (
  21. readOnlyState volumeState = "ReadOnly"
  22. oversizedState = "Oversized"
  23. )
  24. type stateIndicator func(copyState) bool
  25. func ExistCopies() stateIndicator {
  26. return func(state copyState) bool { return state != noCopies }
  27. }
  28. func NoCopies() stateIndicator {
  29. return func(state copyState) bool { return state == noCopies }
  30. }
  31. type volumesBinaryState struct {
  32. rp *super_block.ReplicaPlacement
  33. name volumeState // the name for volume state (eg. "Readonly", "Oversized")
  34. indicator stateIndicator // indicate whether the volumes should be marked as `name`
  35. copyMap map[needle.VolumeId]*VolumeLocationList
  36. }
  37. func NewVolumesBinaryState(name volumeState, rp *super_block.ReplicaPlacement, indicator stateIndicator) *volumesBinaryState {
  38. return &volumesBinaryState{
  39. rp: rp,
  40. name: name,
  41. indicator: indicator,
  42. copyMap: make(map[needle.VolumeId]*VolumeLocationList),
  43. }
  44. }
  45. func (v *volumesBinaryState) Dump() (res []uint32) {
  46. for vid, list := range v.copyMap {
  47. if v.indicator(v.copyState(list)) {
  48. res = append(res, uint32(vid))
  49. }
  50. }
  51. return
  52. }
  53. func (v *volumesBinaryState) IsTrue(vid needle.VolumeId) bool {
  54. list, _ := v.copyMap[vid]
  55. return v.indicator(v.copyState(list))
  56. }
  57. func (v *volumesBinaryState) Add(vid needle.VolumeId, dn *DataNode) {
  58. list, _ := v.copyMap[vid]
  59. if list != nil {
  60. list.Set(dn)
  61. return
  62. }
  63. list = NewVolumeLocationList()
  64. list.Set(dn)
  65. v.copyMap[vid] = list
  66. }
  67. func (v *volumesBinaryState) Remove(vid needle.VolumeId, dn *DataNode) {
  68. list, _ := v.copyMap[vid]
  69. if list != nil {
  70. list.Remove(dn)
  71. if list.Length() == 0 {
  72. delete(v.copyMap, vid)
  73. }
  74. }
  75. }
  76. func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
  77. if list == nil {
  78. return noCopies
  79. }
  80. if list.Length() < v.rp.GetCopyCount() {
  81. return insufficientCopies
  82. }
  83. return enoughCopies
  84. }
  85. // mapping from volume to its locations, inverted from server to volume
  86. type VolumeLayout struct {
  87. rp *super_block.ReplicaPlacement
  88. ttl *needle.TTL
  89. vid2location map[needle.VolumeId]*VolumeLocationList
  90. writables []needle.VolumeId // transient array of writable volume id
  91. readonlyVolumes *volumesBinaryState // readonly volumes
  92. oversizedVolumes *volumesBinaryState // oversized volumes
  93. volumeSizeLimit uint64
  94. replicationAsMin bool
  95. accessLock sync.RWMutex
  96. }
  97. type VolumeLayoutStats struct {
  98. TotalSize uint64
  99. UsedSize uint64
  100. FileCount uint64
  101. }
  102. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
  103. return &VolumeLayout{
  104. rp: rp,
  105. ttl: ttl,
  106. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  107. writables: *new([]needle.VolumeId),
  108. readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
  109. oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()),
  110. volumeSizeLimit: volumeSizeLimit,
  111. replicationAsMin: replicationAsMin,
  112. }
  113. }
  114. func (vl *VolumeLayout) String() string {
  115. return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
  116. }
  117. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  118. vl.accessLock.Lock()
  119. defer vl.accessLock.Unlock()
  120. defer vl.ensureCorrectWritables(v)
  121. defer vl.rememberOversizedVolume(v, dn)
  122. if _, ok := vl.vid2location[v.Id]; !ok {
  123. vl.vid2location[v.Id] = NewVolumeLocationList()
  124. }
  125. vl.vid2location[v.Id].Set(dn)
  126. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  127. for _, dn := range vl.vid2location[v.Id].list {
  128. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  129. if vInfo.ReadOnly {
  130. glog.V(1).Infof("vid %d removed from writable", v.Id)
  131. vl.removeFromWritable(v.Id)
  132. vl.readonlyVolumes.Add(v.Id, dn)
  133. return
  134. } else {
  135. vl.readonlyVolumes.Remove(v.Id, dn)
  136. }
  137. } else {
  138. glog.V(1).Infof("vid %d removed from writable", v.Id)
  139. vl.removeFromWritable(v.Id)
  140. vl.readonlyVolumes.Remove(v.Id, dn)
  141. return
  142. }
  143. }
  144. }
  145. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo, dn *DataNode) {
  146. if vl.isOversized(v) {
  147. vl.oversizedVolumes.Add(v.Id, dn)
  148. } else {
  149. vl.oversizedVolumes.Remove(v.Id, dn)
  150. }
  151. }
  152. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  153. vl.accessLock.Lock()
  154. defer vl.accessLock.Unlock()
  155. // remove from vid2location map
  156. location, ok := vl.vid2location[v.Id]
  157. if !ok {
  158. return
  159. }
  160. if location.Remove(dn) {
  161. vl.readonlyVolumes.Remove(v.Id, dn)
  162. vl.oversizedVolumes.Remove(v.Id, dn)
  163. vl.ensureCorrectWritables(v)
  164. if location.Length() == 0 {
  165. delete(vl.vid2location, v.Id)
  166. }
  167. }
  168. }
  169. func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) {
  170. if vl.enoughCopies(v.Id) && vl.isWritable(v) {
  171. if !vl.oversizedVolumes.IsTrue(v.Id) {
  172. vl.setVolumeWritable(v.Id)
  173. }
  174. } else {
  175. vl.removeFromWritable(v.Id)
  176. }
  177. }
  178. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  179. return uint64(v.Size) >= vl.volumeSizeLimit
  180. }
  181. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  182. return !vl.isOversized(v) &&
  183. v.Version == needle.CurrentVersion &&
  184. !v.ReadOnly
  185. }
  186. func (vl *VolumeLayout) isEmpty() bool {
  187. vl.accessLock.RLock()
  188. defer vl.accessLock.RUnlock()
  189. return len(vl.vid2location) == 0
  190. }
  191. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  192. vl.accessLock.RLock()
  193. defer vl.accessLock.RUnlock()
  194. if location := vl.vid2location[vid]; location != nil {
  195. return location.list
  196. }
  197. return nil
  198. }
  199. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  200. vl.accessLock.RLock()
  201. defer vl.accessLock.RUnlock()
  202. for _, location := range vl.vid2location {
  203. nodes = append(nodes, location.list...)
  204. }
  205. return
  206. }
  207. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
  208. vl.accessLock.RLock()
  209. defer vl.accessLock.RUnlock()
  210. lenWriters := len(vl.writables)
  211. if lenWriters <= 0 {
  212. glog.V(0).Infoln("No more writable volumes!")
  213. return nil, 0, nil, errors.New("No more writable volumes!")
  214. }
  215. if option.DataCenter == "" {
  216. vid := vl.writables[rand.Intn(lenWriters)]
  217. locationList := vl.vid2location[vid]
  218. if locationList != nil {
  219. return &vid, count, locationList, nil
  220. }
  221. return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  222. }
  223. var vid needle.VolumeId
  224. var locationList *VolumeLocationList
  225. counter := 0
  226. for _, v := range vl.writables {
  227. volumeLocationList := vl.vid2location[v]
  228. for _, dn := range volumeLocationList.list {
  229. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  230. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  231. continue
  232. }
  233. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  234. continue
  235. }
  236. counter++
  237. if rand.Intn(counter) < 1 {
  238. vid, locationList = v, volumeLocationList
  239. }
  240. }
  241. }
  242. }
  243. return &vid, count, locationList, nil
  244. }
  245. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
  246. vl.accessLock.RLock()
  247. defer vl.accessLock.RUnlock()
  248. if option.DataCenter == "" {
  249. return len(vl.writables)
  250. }
  251. counter := 0
  252. for _, v := range vl.writables {
  253. for _, dn := range vl.vid2location[v].list {
  254. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  255. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  256. continue
  257. }
  258. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  259. continue
  260. }
  261. counter++
  262. }
  263. }
  264. }
  265. return counter
  266. }
  267. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  268. toDeleteIndex := -1
  269. for k, id := range vl.writables {
  270. if id == vid {
  271. toDeleteIndex = k
  272. break
  273. }
  274. }
  275. if toDeleteIndex >= 0 {
  276. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  277. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  278. return true
  279. }
  280. return false
  281. }
  282. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  283. for _, v := range vl.writables {
  284. if v == vid {
  285. return false
  286. }
  287. }
  288. glog.V(0).Infoln("Volume", vid, "becomes writable")
  289. vl.writables = append(vl.writables, vid)
  290. return true
  291. }
  292. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  293. vl.accessLock.Lock()
  294. defer vl.accessLock.Unlock()
  295. if location, ok := vl.vid2location[vid]; ok {
  296. if location.Remove(dn) {
  297. vl.readonlyVolumes.Remove(vid, dn)
  298. vl.oversizedVolumes.Remove(vid, dn)
  299. if location.Length() < vl.rp.GetCopyCount() {
  300. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  301. return vl.removeFromWritable(vid)
  302. }
  303. }
  304. }
  305. return false
  306. }
  307. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
  308. vl.accessLock.Lock()
  309. defer vl.accessLock.Unlock()
  310. vInfo, err := dn.GetVolumesById(vid)
  311. if err != nil {
  312. return false
  313. }
  314. vl.vid2location[vid].Set(dn)
  315. if vInfo.ReadOnly || isReadOnly {
  316. return false
  317. }
  318. if vl.enoughCopies(vid) {
  319. return vl.setVolumeWritable(vid)
  320. }
  321. return false
  322. }
  323. func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
  324. locations := vl.vid2location[vid].Length()
  325. desired := vl.rp.GetCopyCount()
  326. return locations == desired || (vl.replicationAsMin && locations > desired)
  327. }
  328. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  329. vl.accessLock.Lock()
  330. defer vl.accessLock.Unlock()
  331. // glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
  332. return vl.removeFromWritable(vid)
  333. }
  334. func (vl *VolumeLayout) ToMap() map[string]interface{} {
  335. m := make(map[string]interface{})
  336. m["replication"] = vl.rp.String()
  337. m["ttl"] = vl.ttl.String()
  338. m["writables"] = vl.writables
  339. //m["locations"] = vl.vid2location
  340. return m
  341. }
  342. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  343. vl.accessLock.RLock()
  344. defer vl.accessLock.RUnlock()
  345. ret := &VolumeLayoutStats{}
  346. freshThreshold := time.Now().Unix() - 60
  347. for vid, vll := range vl.vid2location {
  348. size, fileCount := vll.Stats(vid, freshThreshold)
  349. ret.FileCount += uint64(fileCount)
  350. ret.UsedSize += size
  351. if vl.readonlyVolumes.IsTrue(vid) {
  352. ret.TotalSize += size
  353. } else {
  354. ret.TotalSize += vl.volumeSizeLimit
  355. }
  356. }
  357. return ret
  358. }