You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

461 lines
11 KiB

6 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
  1. package topology
  2. import (
  3. "encoding/json"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  6. "github.com/seaweedfs/seaweedfs/weed/util"
  7. "testing"
  8. "github.com/seaweedfs/seaweedfs/weed/sequence"
  9. "github.com/seaweedfs/seaweedfs/weed/storage"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  11. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  12. )
  13. var topologyLayout = `
  14. {
  15. "dc1":{
  16. "rack1":{
  17. "server111":{
  18. "volumes":[
  19. {"id":1, "size":12312},
  20. {"id":2, "size":12312},
  21. {"id":3, "size":12312}
  22. ],
  23. "limit":3
  24. },
  25. "server112":{
  26. "volumes":[
  27. {"id":4, "size":12312},
  28. {"id":5, "size":12312},
  29. {"id":6, "size":12312}
  30. ],
  31. "limit":10
  32. }
  33. },
  34. "rack2":{
  35. "server121":{
  36. "volumes":[
  37. {"id":4, "size":12312},
  38. {"id":5, "size":12312},
  39. {"id":6, "size":12312}
  40. ],
  41. "limit":4
  42. },
  43. "server122":{
  44. "volumes":[],
  45. "limit":4
  46. },
  47. "server123":{
  48. "volumes":[
  49. {"id":2, "size":12312},
  50. {"id":3, "size":12312},
  51. {"id":4, "size":12312}
  52. ],
  53. "limit":5
  54. }
  55. }
  56. },
  57. "dc2":{
  58. },
  59. "dc3":{
  60. "rack2":{
  61. "server321":{
  62. "volumes":[
  63. {"id":1, "size":12312},
  64. {"id":3, "size":12312},
  65. {"id":5, "size":12312}
  66. ],
  67. "limit":4
  68. }
  69. }
  70. }
  71. }
  72. `
  73. func setup(topologyLayout string) *Topology {
  74. var data interface{}
  75. err := json.Unmarshal([]byte(topologyLayout), &data)
  76. if err != nil {
  77. fmt.Println("error:", err)
  78. }
  79. fmt.Println("data:", data)
  80. //need to connect all nodes first before server adding volumes
  81. topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
  82. mTopology := data.(map[string]interface{})
  83. for dcKey, dcValue := range mTopology {
  84. dc := NewDataCenter(dcKey)
  85. dcMap := dcValue.(map[string]interface{})
  86. topo.LinkChildNode(dc)
  87. for rackKey, rackValue := range dcMap {
  88. dcRack := NewRack(rackKey)
  89. rackMap := rackValue.(map[string]interface{})
  90. dc.LinkChildNode(dcRack)
  91. for serverKey, serverValue := range rackMap {
  92. server := NewDataNode(serverKey)
  93. serverMap := serverValue.(map[string]interface{})
  94. if ip, ok := serverMap["ip"]; ok {
  95. server.Ip = ip.(string)
  96. }
  97. dcRack.LinkChildNode(server)
  98. for _, v := range serverMap["volumes"].([]interface{}) {
  99. m := v.(map[string]interface{})
  100. vi := storage.VolumeInfo{
  101. Id: needle.VolumeId(int64(m["id"].(float64))),
  102. Size: uint64(m["size"].(float64)),
  103. Version: needle.CurrentVersion,
  104. }
  105. if mVal, ok := m["collection"]; ok {
  106. vi.Collection = mVal.(string)
  107. }
  108. if mVal, ok := m["replication"]; ok {
  109. rp, _ := super_block.NewReplicaPlacementFromString(mVal.(string))
  110. vi.ReplicaPlacement = rp
  111. }
  112. if vi.ReplicaPlacement != nil {
  113. vl := topo.GetVolumeLayout(vi.Collection, vi.ReplicaPlacement, needle.EMPTY_TTL, types.HardDriveType)
  114. vl.RegisterVolume(&vi, server)
  115. vl.setVolumeWritable(vi.Id)
  116. }
  117. server.AddOrUpdateVolume(vi)
  118. }
  119. disk := server.getOrCreateDisk("")
  120. deltaDiskUsages := newDiskUsages()
  121. deltaDiskUsage := deltaDiskUsages.getOrCreateDisk("")
  122. deltaDiskUsage.maxVolumeCount = int64(serverMap["limit"].(float64))
  123. disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
  124. }
  125. }
  126. }
  127. return topo
  128. }
  129. func TestFindEmptySlotsForOneVolume(t *testing.T) {
  130. topo := setup(topologyLayout)
  131. vg := NewDefaultVolumeGrowth()
  132. rp, _ := super_block.NewReplicaPlacementFromString("002")
  133. volumeGrowOption := &VolumeGrowOption{
  134. Collection: "",
  135. ReplicaPlacement: rp,
  136. DataCenter: "dc1",
  137. Rack: "",
  138. DataNode: "",
  139. }
  140. servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
  141. if err != nil {
  142. fmt.Println("finding empty slots error :", err)
  143. t.Fail()
  144. }
  145. for _, server := range servers {
  146. fmt.Println("assigned node :", server.Id())
  147. }
  148. }
  149. var topologyLayout2 = `
  150. {
  151. "dc1":{
  152. "rack1":{
  153. "server111":{
  154. "volumes":[
  155. {"id":1, "size":12312},
  156. {"id":2, "size":12312},
  157. {"id":3, "size":12312}
  158. ],
  159. "limit":300
  160. },
  161. "server112":{
  162. "volumes":[
  163. {"id":4, "size":12312},
  164. {"id":5, "size":12312},
  165. {"id":6, "size":12312}
  166. ],
  167. "limit":300
  168. },
  169. "server113":{
  170. "volumes":[],
  171. "limit":300
  172. },
  173. "server114":{
  174. "volumes":[],
  175. "limit":300
  176. },
  177. "server115":{
  178. "volumes":[],
  179. "limit":300
  180. },
  181. "server116":{
  182. "volumes":[],
  183. "limit":300
  184. }
  185. },
  186. "rack2":{
  187. "server121":{
  188. "volumes":[
  189. {"id":4, "size":12312},
  190. {"id":5, "size":12312},
  191. {"id":6, "size":12312}
  192. ],
  193. "limit":300
  194. },
  195. "server122":{
  196. "volumes":[],
  197. "limit":300
  198. },
  199. "server123":{
  200. "volumes":[
  201. {"id":2, "size":12312},
  202. {"id":3, "size":12312},
  203. {"id":4, "size":12312}
  204. ],
  205. "limit":300
  206. },
  207. "server124":{
  208. "volumes":[],
  209. "limit":300
  210. },
  211. "server125":{
  212. "volumes":[],
  213. "limit":300
  214. },
  215. "server126":{
  216. "volumes":[],
  217. "limit":300
  218. }
  219. },
  220. "rack3":{
  221. "server131":{
  222. "volumes":[],
  223. "limit":300
  224. },
  225. "server132":{
  226. "volumes":[],
  227. "limit":300
  228. },
  229. "server133":{
  230. "volumes":[],
  231. "limit":300
  232. },
  233. "server134":{
  234. "volumes":[],
  235. "limit":300
  236. },
  237. "server135":{
  238. "volumes":[],
  239. "limit":300
  240. },
  241. "server136":{
  242. "volumes":[],
  243. "limit":300
  244. }
  245. }
  246. }
  247. }
  248. `
  249. func TestReplication011(t *testing.T) {
  250. topo := setup(topologyLayout2)
  251. vg := NewDefaultVolumeGrowth()
  252. rp, _ := super_block.NewReplicaPlacementFromString("011")
  253. volumeGrowOption := &VolumeGrowOption{
  254. Collection: "MAIL",
  255. ReplicaPlacement: rp,
  256. DataCenter: "dc1",
  257. Rack: "",
  258. DataNode: "",
  259. }
  260. servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
  261. if err != nil {
  262. fmt.Println("finding empty slots error :", err)
  263. t.Fail()
  264. }
  265. for _, server := range servers {
  266. fmt.Println("assigned node :", server.Id())
  267. }
  268. }
  269. var topologyLayout3 = `
  270. {
  271. "dc1":{
  272. "rack1":{
  273. "server111":{
  274. "volumes":[],
  275. "limit":2000
  276. }
  277. }
  278. },
  279. "dc2":{
  280. "rack2":{
  281. "server222":{
  282. "volumes":[],
  283. "limit":2000
  284. }
  285. }
  286. },
  287. "dc3":{
  288. "rack3":{
  289. "server333":{
  290. "volumes":[],
  291. "limit":1000
  292. }
  293. }
  294. },
  295. "dc4":{
  296. "rack4":{
  297. "server444":{
  298. "volumes":[],
  299. "limit":1000
  300. }
  301. }
  302. },
  303. "dc5":{
  304. "rack5":{
  305. "server555":{
  306. "volumes":[],
  307. "limit":500
  308. }
  309. }
  310. },
  311. "dc6":{
  312. "rack6":{
  313. "server666":{
  314. "volumes":[],
  315. "limit":500
  316. }
  317. }
  318. }
  319. }
  320. `
  321. func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) {
  322. topo := setup(topologyLayout3)
  323. vg := NewDefaultVolumeGrowth()
  324. rp, _ := super_block.NewReplicaPlacementFromString("100")
  325. volumeGrowOption := &VolumeGrowOption{
  326. Collection: "Weight",
  327. ReplicaPlacement: rp,
  328. DataCenter: "",
  329. Rack: "",
  330. DataNode: "",
  331. }
  332. distribution := map[NodeId]int{}
  333. // assign 1000 volumes
  334. for i := 0; i < 1000; i++ {
  335. servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
  336. if err != nil {
  337. fmt.Println("finding empty slots error :", err)
  338. t.Fail()
  339. }
  340. for _, server := range servers {
  341. // fmt.Println("assigned node :", server.Id())
  342. if _, ok := distribution[server.id]; !ok {
  343. distribution[server.id] = 0
  344. }
  345. distribution[server.id] += 1
  346. }
  347. }
  348. for k, v := range distribution {
  349. fmt.Printf("%s : %d\n", k, v)
  350. }
  351. }
  352. var topologyLayout4 = `
  353. {
  354. "dc1":{
  355. "rack1":{
  356. "serverdc111":{
  357. "ip": "127.0.0.1",
  358. "volumes":[
  359. {"id":1, "size":12312, "collection":"test", "replication":"001"},
  360. {"id":2, "size":12312, "collection":"test", "replication":"100"},
  361. {"id":4, "size":12312, "collection":"test", "replication":"100"},
  362. {"id":6, "size":12312, "collection":"test", "replication":"010"}
  363. ],
  364. "limit":100
  365. }
  366. }
  367. },
  368. "dc2":{
  369. "rack1":{
  370. "serverdc211":{
  371. "ip": "127.0.0.2",
  372. "volumes":[
  373. {"id":2, "size":12312, "collection":"test", "replication":"100"},
  374. {"id":3, "size":12312, "collection":"test", "replication":"010"},
  375. {"id":5, "size":12312, "collection":"test", "replication":"001"},
  376. {"id":6, "size":12312, "collection":"test", "replication":"010"}
  377. ],
  378. "limit":100
  379. }
  380. }
  381. },
  382. "dc3":{
  383. "rack1":{
  384. "serverdc311":{
  385. "ip": "127.0.0.3",
  386. "volumes":[
  387. {"id":1, "size":12312, "collection":"test", "replication":"001"},
  388. {"id":3, "size":12312, "collection":"test", "replication":"010"},
  389. {"id":4, "size":12312, "collection":"test", "replication":"100"},
  390. {"id":5, "size":12312, "collection":"test", "replication":"001"}
  391. ],
  392. "limit":100
  393. }
  394. }
  395. }
  396. }
  397. `
  398. func TestPickForWrite(t *testing.T) {
  399. topo := setup(topologyLayout4)
  400. volumeGrowOption := &VolumeGrowOption{
  401. Collection: "test",
  402. DataCenter: "",
  403. Rack: "",
  404. DataNode: "",
  405. }
  406. v := util.GetViper()
  407. v.Set("master.volume_growth.threshold", 0.9)
  408. for _, rpStr := range []string{"001", "010", "100"} {
  409. rp, _ := super_block.NewReplicaPlacementFromString(rpStr)
  410. vl := topo.GetVolumeLayout("test", rp, needle.EMPTY_TTL, types.HardDriveType)
  411. volumeGrowOption.ReplicaPlacement = rp
  412. for _, dc := range []string{"", "dc1", "dc2", "dc3", "dc0"} {
  413. volumeGrowOption.DataCenter = dc
  414. for _, r := range []string{""} {
  415. volumeGrowOption.Rack = r
  416. for _, dn := range []string{""} {
  417. if dc == "" && dn != "" {
  418. continue
  419. }
  420. volumeGrowOption.DataNode = dn
  421. fileId, count, _, shouldGrow, err := topo.PickForWrite(1, volumeGrowOption, vl)
  422. if dc == "dc0" {
  423. if err == nil || count != 0 || !shouldGrow {
  424. fmt.Println(dc, r, dn, "pick for write should be with error")
  425. t.Fail()
  426. }
  427. } else if err != nil {
  428. fmt.Println(dc, r, dn, "pick for write error :", err)
  429. t.Fail()
  430. } else if count == 0 {
  431. fmt.Println(dc, r, dn, "pick for write count is zero")
  432. t.Fail()
  433. } else if len(fileId) == 0 {
  434. fmt.Println(dc, r, dn, "pick for write file id is empty")
  435. t.Fail()
  436. } else if shouldGrow {
  437. fmt.Println(dc, r, dn, "pick for write error : not should grow")
  438. t.Fail()
  439. }
  440. }
  441. }
  442. }
  443. }
  444. }