You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

318 lines
8.2 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "math/rand"
  6. "sync"
  7. "time"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/storage"
  10. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  11. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  12. )
  13. // mapping from volume to its locations, inverted from server to volume
  14. type VolumeLayout struct {
  15. rp *super_block.ReplicaPlacement
  16. ttl *needle.TTL
  17. vid2location map[needle.VolumeId]*VolumeLocationList
  18. writables []needle.VolumeId // transient array of writable volume id
  19. readonlyVolumes map[needle.VolumeId]bool // transient set of readonly volumes
  20. oversizedVolumes map[needle.VolumeId]bool // set of oversized volumes
  21. volumeSizeLimit uint64
  22. accessLock sync.RWMutex
  23. }
  24. type VolumeLayoutStats struct {
  25. TotalSize uint64
  26. UsedSize uint64
  27. FileCount uint64
  28. }
  29. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64) *VolumeLayout {
  30. return &VolumeLayout{
  31. rp: rp,
  32. ttl: ttl,
  33. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  34. writables: *new([]needle.VolumeId),
  35. readonlyVolumes: make(map[needle.VolumeId]bool),
  36. oversizedVolumes: make(map[needle.VolumeId]bool),
  37. volumeSizeLimit: volumeSizeLimit,
  38. }
  39. }
  40. func (vl *VolumeLayout) String() string {
  41. return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
  42. }
  43. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  44. vl.accessLock.Lock()
  45. defer vl.accessLock.Unlock()
  46. defer vl.ensureCorrectWritables(v)
  47. defer vl.rememberOversizedVolume(v)
  48. if _, ok := vl.vid2location[v.Id]; !ok {
  49. vl.vid2location[v.Id] = NewVolumeLocationList()
  50. }
  51. vl.vid2location[v.Id].Set(dn)
  52. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  53. for _, dn := range vl.vid2location[v.Id].list {
  54. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  55. if vInfo.ReadOnly {
  56. glog.V(1).Infof("vid %d removed from writable", v.Id)
  57. vl.removeFromWritable(v.Id)
  58. vl.readonlyVolumes[v.Id] = true
  59. return
  60. } else {
  61. delete(vl.readonlyVolumes, v.Id)
  62. }
  63. } else {
  64. glog.V(1).Infof("vid %d removed from writable", v.Id)
  65. vl.removeFromWritable(v.Id)
  66. delete(vl.readonlyVolumes, v.Id)
  67. return
  68. }
  69. }
  70. }
  71. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo) {
  72. if vl.isOversized(v) {
  73. vl.oversizedVolumes[v.Id] = true
  74. }
  75. }
  76. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  77. vl.accessLock.Lock()
  78. defer vl.accessLock.Unlock()
  79. // remove from vid2location map
  80. location, ok := vl.vid2location[v.Id]
  81. if !ok {
  82. return
  83. }
  84. if location.Remove(dn) {
  85. vl.ensureCorrectWritables(v)
  86. if location.Length() == 0 {
  87. delete(vl.vid2location, v.Id)
  88. }
  89. }
  90. }
  91. func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) {
  92. if vl.vid2location[v.Id].Length() == vl.rp.GetCopyCount() && vl.isWritable(v) {
  93. if _, ok := vl.oversizedVolumes[v.Id]; !ok {
  94. vl.setVolumeWritable(v.Id)
  95. }
  96. } else {
  97. vl.removeFromWritable(v.Id)
  98. }
  99. }
  100. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  101. return uint64(v.Size) >= vl.volumeSizeLimit
  102. }
  103. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  104. return !vl.isOversized(v) &&
  105. v.Version == needle.CurrentVersion &&
  106. !v.ReadOnly
  107. }
  108. func (vl *VolumeLayout) isEmpty() bool {
  109. vl.accessLock.RLock()
  110. defer vl.accessLock.RUnlock()
  111. return len(vl.vid2location) == 0
  112. }
  113. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  114. vl.accessLock.RLock()
  115. defer vl.accessLock.RUnlock()
  116. if location := vl.vid2location[vid]; location != nil {
  117. return location.list
  118. }
  119. return nil
  120. }
  121. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  122. vl.accessLock.RLock()
  123. defer vl.accessLock.RUnlock()
  124. for _, location := range vl.vid2location {
  125. nodes = append(nodes, location.list...)
  126. }
  127. return
  128. }
  129. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
  130. vl.accessLock.RLock()
  131. defer vl.accessLock.RUnlock()
  132. lenWriters := len(vl.writables)
  133. if lenWriters <= 0 {
  134. glog.V(0).Infoln("No more writable volumes!")
  135. return nil, 0, nil, errors.New("No more writable volumes!")
  136. }
  137. if option.DataCenter == "" {
  138. vid := vl.writables[rand.Intn(lenWriters)]
  139. locationList := vl.vid2location[vid]
  140. if locationList != nil {
  141. return &vid, count, locationList, nil
  142. }
  143. return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  144. }
  145. var vid needle.VolumeId
  146. var locationList *VolumeLocationList
  147. counter := 0
  148. for _, v := range vl.writables {
  149. volumeLocationList := vl.vid2location[v]
  150. for _, dn := range volumeLocationList.list {
  151. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  152. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  153. continue
  154. }
  155. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  156. continue
  157. }
  158. counter++
  159. if rand.Intn(counter) < 1 {
  160. vid, locationList = v, volumeLocationList
  161. }
  162. }
  163. }
  164. }
  165. return &vid, count, locationList, nil
  166. }
  167. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
  168. vl.accessLock.RLock()
  169. defer vl.accessLock.RUnlock()
  170. if option.DataCenter == "" {
  171. return len(vl.writables)
  172. }
  173. counter := 0
  174. for _, v := range vl.writables {
  175. for _, dn := range vl.vid2location[v].list {
  176. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  177. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  178. continue
  179. }
  180. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  181. continue
  182. }
  183. counter++
  184. }
  185. }
  186. }
  187. return counter
  188. }
  189. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  190. toDeleteIndex := -1
  191. for k, id := range vl.writables {
  192. if id == vid {
  193. toDeleteIndex = k
  194. break
  195. }
  196. }
  197. if toDeleteIndex >= 0 {
  198. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  199. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  200. return true
  201. }
  202. return false
  203. }
  204. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  205. for _, v := range vl.writables {
  206. if v == vid {
  207. return false
  208. }
  209. }
  210. glog.V(0).Infoln("Volume", vid, "becomes writable")
  211. vl.writables = append(vl.writables, vid)
  212. return true
  213. }
  214. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  215. vl.accessLock.Lock()
  216. defer vl.accessLock.Unlock()
  217. if location, ok := vl.vid2location[vid]; ok {
  218. if location.Remove(dn) {
  219. if location.Length() < vl.rp.GetCopyCount() {
  220. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  221. return vl.removeFromWritable(vid)
  222. }
  223. }
  224. }
  225. return false
  226. }
  227. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
  228. vl.accessLock.Lock()
  229. defer vl.accessLock.Unlock()
  230. vInfo, err := dn.GetVolumesById(vid)
  231. if err != nil {
  232. return false
  233. }
  234. vl.vid2location[vid].Set(dn)
  235. if vInfo.ReadOnly || isReadOnly {
  236. return false
  237. }
  238. if vl.vid2location[vid].Length() == vl.rp.GetCopyCount() {
  239. return vl.setVolumeWritable(vid)
  240. }
  241. return false
  242. }
  243. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  244. vl.accessLock.Lock()
  245. defer vl.accessLock.Unlock()
  246. // glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
  247. return vl.removeFromWritable(vid)
  248. }
  249. func (vl *VolumeLayout) ToMap() map[string]interface{} {
  250. m := make(map[string]interface{})
  251. m["replication"] = vl.rp.String()
  252. m["ttl"] = vl.ttl.String()
  253. m["writables"] = vl.writables
  254. //m["locations"] = vl.vid2location
  255. return m
  256. }
  257. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  258. vl.accessLock.RLock()
  259. defer vl.accessLock.RUnlock()
  260. ret := &VolumeLayoutStats{}
  261. freshThreshold := time.Now().Unix() - 60
  262. for vid, vll := range vl.vid2location {
  263. size, fileCount := vll.Stats(vid, freshThreshold)
  264. ret.FileCount += uint64(fileCount)
  265. ret.UsedSize += size
  266. if vl.readonlyVolumes[vid] {
  267. ret.TotalSize += size
  268. } else {
  269. ret.TotalSize += vl.volumeSizeLimit
  270. }
  271. }
  272. return ret
  273. }