You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

356 lines
11 KiB

  1. package shell
  2. import (
  3. "fmt"
  4. "reflect"
  5. "strings"
  6. "testing"
  7. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  8. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  11. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  12. )
  13. var (
  14. topology1 = parseOutput(topoData)
  15. topology2 = parseOutput(topoData2)
  16. topologyEc = parseOutput(topoDataEc)
  17. )
  18. func errorCheck(got error, want string) error {
  19. if got == nil && want == "" {
  20. return nil
  21. }
  22. if got != nil && want == "" {
  23. return fmt.Errorf("expected no error, got %q", got.Error())
  24. }
  25. if got == nil && want != "" {
  26. return fmt.Errorf("got no error, expected %q", want)
  27. }
  28. if !strings.Contains(got.Error(), want) {
  29. return fmt.Errorf("expected error %q, got %q", want, got.Error())
  30. }
  31. return nil
  32. }
  33. func TestCollectCollectionsForVolumeIds(t *testing.T) {
  34. testCases := []struct {
  35. topology *master_pb.TopologyInfo
  36. vids []needle.VolumeId
  37. want []string
  38. }{
  39. // normal volumes
  40. {topology1, nil, nil},
  41. {topology1, []needle.VolumeId{}, nil},
  42. {topology1, []needle.VolumeId{needle.VolumeId(9999)}, nil},
  43. {topology1, []needle.VolumeId{needle.VolumeId(2)}, []string{""}},
  44. {topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272)}, []string{"", "collection2"}},
  45. {topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272), needle.VolumeId(299)}, []string{"", "collection2"}},
  46. {topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95)}, []string{"collection1", "collection2"}},
  47. {topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51)}, []string{"collection1", "collection2"}},
  48. {topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51), needle.VolumeId(15)}, []string{"collection0", "collection1", "collection2"}},
  49. // EC volumes
  50. {topology2, []needle.VolumeId{needle.VolumeId(9577)}, []string{"s3qldata"}},
  51. {topology2, []needle.VolumeId{needle.VolumeId(9577), needle.VolumeId(12549)}, []string{"s3qldata"}},
  52. // normal + EC volumes
  53. {topology2, []needle.VolumeId{needle.VolumeId(18111)}, []string{"s3qldata"}},
  54. {topology2, []needle.VolumeId{needle.VolumeId(8677)}, []string{"s3qldata"}},
  55. {topology2, []needle.VolumeId{needle.VolumeId(18111), needle.VolumeId(8677)}, []string{"s3qldata"}},
  56. }
  57. for _, tc := range testCases {
  58. got := collectCollectionsForVolumeIds(tc.topology, tc.vids)
  59. if !reflect.DeepEqual(got, tc.want) {
  60. t.Errorf("for %v: got %v, want %v", tc.vids, got, tc.want)
  61. }
  62. }
  63. }
  64. func TestParseReplicaPlacementArg(t *testing.T) {
  65. getDefaultReplicaPlacementOrig := getDefaultReplicaPlacement
  66. getDefaultReplicaPlacement = func(commandEnv *CommandEnv) (*super_block.ReplicaPlacement, error) {
  67. return super_block.NewReplicaPlacementFromString("123")
  68. }
  69. defer func() {
  70. getDefaultReplicaPlacement = getDefaultReplicaPlacementOrig
  71. }()
  72. testCases := []struct {
  73. argument string
  74. want string
  75. wantErr string
  76. }{
  77. {"lalala", "lal", "unexpected replication type"},
  78. {"", "123", ""},
  79. {"021", "021", ""},
  80. }
  81. for _, tc := range testCases {
  82. commandEnv := &CommandEnv{}
  83. got, gotErr := parseReplicaPlacementArg(commandEnv, tc.argument)
  84. if err := errorCheck(gotErr, tc.wantErr); err != nil {
  85. t.Errorf("argument %q: %s", tc.argument, err.Error())
  86. continue
  87. }
  88. want, _ := super_block.NewReplicaPlacementFromString(tc.want)
  89. if !got.Equals(want) {
  90. t.Errorf("got replica placement %q, want %q", got.String(), want.String())
  91. }
  92. }
  93. }
  94. func TestEcDistribution(t *testing.T) {
  95. // find out all volume servers with one slot left.
  96. ecNodes, totalFreeEcSlots := collectEcVolumeServersByDc(topology1, "")
  97. sortEcNodesByFreeslotsDescending(ecNodes)
  98. if totalFreeEcSlots < erasure_coding.TotalShardsCount {
  99. t.Errorf("not enough free ec shard slots: %d", totalFreeEcSlots)
  100. }
  101. allocatedDataNodes := ecNodes
  102. if len(allocatedDataNodes) > erasure_coding.TotalShardsCount {
  103. allocatedDataNodes = allocatedDataNodes[:erasure_coding.TotalShardsCount]
  104. }
  105. for _, dn := range allocatedDataNodes {
  106. // fmt.Printf("info %+v %+v\n", dn.info, dn)
  107. fmt.Printf("=> %+v %+v\n", dn.info.Id, dn.freeEcSlot)
  108. }
  109. }
  110. func TestPickRackToBalanceShardsInto(t *testing.T) {
  111. testCases := []struct {
  112. topology *master_pb.TopologyInfo
  113. vid string
  114. replicaPlacement string
  115. wantOneOf []string
  116. wantErr string
  117. }{
  118. // Non-EC volumes. We don't care about these, but the function should return all racks as a safeguard.
  119. {topologyEc, "", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
  120. {topologyEc, "6225", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
  121. {topologyEc, "6226", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
  122. {topologyEc, "6241", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
  123. {topologyEc, "6242", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
  124. // EC volumes.
  125. {topologyEc, "9577", "", nil, "shards 1 > replica placement limit for other racks (0)"},
  126. {topologyEc, "9577", "111", []string{"rack1", "rack2", "rack3"}, ""},
  127. {topologyEc, "9577", "222", []string{"rack1", "rack2", "rack3"}, ""},
  128. {topologyEc, "10457", "222", []string{"rack1"}, ""},
  129. {topologyEc, "12737", "222", []string{"rack2"}, ""},
  130. {topologyEc, "14322", "222", []string{"rack3"}, ""},
  131. }
  132. for _, tc := range testCases {
  133. vid, _ := needle.NewVolumeId(tc.vid)
  134. ecNodes, _ := collectEcVolumeServersByDc(tc.topology, "")
  135. rp, _ := super_block.NewReplicaPlacementFromString(tc.replicaPlacement)
  136. ecb := &ecBalancer{
  137. ecNodes: ecNodes,
  138. replicaPlacement: rp,
  139. }
  140. racks := ecb.racks()
  141. rackToShardCount := countShardsByRack(vid, ecNodes)
  142. got, gotErr := ecb.pickRackToBalanceShardsInto(racks, rackToShardCount)
  143. if err := errorCheck(gotErr, tc.wantErr); err != nil {
  144. t.Errorf("volume %q: %s", tc.vid, err.Error())
  145. continue
  146. }
  147. if string(got) == "" && len(tc.wantOneOf) == 0 {
  148. continue
  149. }
  150. found := false
  151. for _, want := range tc.wantOneOf {
  152. if got := string(got); got == want {
  153. found = true
  154. break
  155. }
  156. }
  157. if !(found) {
  158. t.Errorf("expected one of %v for volume %q, got %q", tc.wantOneOf, tc.vid, got)
  159. }
  160. }
  161. }
  162. func TestPickEcNodeToBalanceShardsInto(t *testing.T) {
  163. testCases := []struct {
  164. topology *master_pb.TopologyInfo
  165. nodeId string
  166. vid string
  167. wantOneOf []string
  168. wantErr string
  169. }{
  170. {topologyEc, "", "", nil, "INTERNAL: missing source nodes"},
  171. {topologyEc, "idontexist", "12737", nil, "INTERNAL: missing source nodes"},
  172. // Non-EC nodes. We don't care about these, but the function should return all available target nodes as a safeguard.
  173. {
  174. topologyEc, "172.19.0.10:8702", "6225", []string{
  175. "172.19.0.13:8701", "172.19.0.14:8711", "172.19.0.16:8704", "172.19.0.17:8703",
  176. "172.19.0.19:8700", "172.19.0.20:8706", "172.19.0.21:8710", "172.19.0.3:8708",
  177. "172.19.0.4:8707", "172.19.0.5:8705", "172.19.0.6:8713", "172.19.0.8:8709",
  178. "172.19.0.9:8712"},
  179. "",
  180. },
  181. {
  182. topologyEc, "172.19.0.8:8709", "6226", []string{
  183. "172.19.0.10:8702", "172.19.0.13:8701", "172.19.0.14:8711", "172.19.0.16:8704",
  184. "172.19.0.17:8703", "172.19.0.19:8700", "172.19.0.20:8706", "172.19.0.21:8710",
  185. "172.19.0.3:8708", "172.19.0.4:8707", "172.19.0.5:8705", "172.19.0.6:8713",
  186. "172.19.0.9:8712"},
  187. "",
  188. },
  189. // EC volumes.
  190. {topologyEc, "172.19.0.10:8702", "14322", []string{
  191. "172.19.0.14:8711", "172.19.0.5:8705", "172.19.0.6:8713"},
  192. ""},
  193. {topologyEc, "172.19.0.13:8701", "10457", []string{
  194. "172.19.0.10:8702", "172.19.0.6:8713"},
  195. ""},
  196. {topologyEc, "172.19.0.17:8703", "12737", []string{
  197. "172.19.0.13:8701"},
  198. ""},
  199. {topologyEc, "172.19.0.20:8706", "14322", []string{
  200. "172.19.0.14:8711", "172.19.0.5:8705", "172.19.0.6:8713"},
  201. ""},
  202. }
  203. for _, tc := range testCases {
  204. vid, _ := needle.NewVolumeId(tc.vid)
  205. allEcNodes, _ := collectEcVolumeServersByDc(tc.topology, "")
  206. ecb := &ecBalancer{
  207. ecNodes: allEcNodes,
  208. }
  209. // Resolve target node by name
  210. var ecNode *EcNode
  211. for _, n := range allEcNodes {
  212. if n.info.Id == tc.nodeId {
  213. ecNode = n
  214. break
  215. }
  216. }
  217. got, gotErr := ecb.pickEcNodeToBalanceShardsInto(vid, ecNode, allEcNodes)
  218. if err := errorCheck(gotErr, tc.wantErr); err != nil {
  219. t.Errorf("node %q, volume %q: %s", tc.nodeId, tc.vid, err.Error())
  220. continue
  221. }
  222. if got == nil {
  223. if len(tc.wantOneOf) == 0 {
  224. continue
  225. }
  226. t.Errorf("node %q, volume %q: got no node, want %q", tc.nodeId, tc.vid, tc.wantOneOf)
  227. continue
  228. }
  229. found := false
  230. for _, want := range tc.wantOneOf {
  231. if got := got.info.Id; got == want {
  232. found = true
  233. break
  234. }
  235. }
  236. if !(found) {
  237. t.Errorf("expected one of %v for volume %q, got %q", tc.wantOneOf, tc.vid, got.info.Id)
  238. }
  239. }
  240. }
  241. func TestCountFreeShardSlots(t *testing.T) {
  242. testCases := []struct {
  243. name string
  244. topology *master_pb.TopologyInfo
  245. diskType types.DiskType
  246. want map[string]int
  247. }{
  248. {
  249. name: "topology #1, free HDD shards",
  250. topology: topology1,
  251. diskType: types.HardDriveType,
  252. want: map[string]int{
  253. "192.168.1.1:8080": 17330,
  254. "192.168.1.2:8080": 1540,
  255. "192.168.1.4:8080": 1900,
  256. "192.168.1.5:8080": 27010,
  257. "192.168.1.6:8080": 17420,
  258. },
  259. },
  260. {
  261. name: "topology #1, no free SSD shards available",
  262. topology: topology1,
  263. diskType: types.SsdType,
  264. want: map[string]int{
  265. "192.168.1.1:8080": 0,
  266. "192.168.1.2:8080": 0,
  267. "192.168.1.4:8080": 0,
  268. "192.168.1.5:8080": 0,
  269. "192.168.1.6:8080": 0,
  270. },
  271. },
  272. {
  273. name: "topology #2, no negative free HDD shards",
  274. topology: topology2,
  275. diskType: types.HardDriveType,
  276. want: map[string]int{
  277. "172.19.0.3:8708": 0,
  278. "172.19.0.4:8707": 8,
  279. "172.19.0.5:8705": 58,
  280. "172.19.0.6:8713": 39,
  281. "172.19.0.8:8709": 8,
  282. "172.19.0.9:8712": 0,
  283. "172.19.0.10:8702": 0,
  284. "172.19.0.13:8701": 0,
  285. "172.19.0.14:8711": 0,
  286. "172.19.0.16:8704": 89,
  287. "172.19.0.17:8703": 0,
  288. "172.19.0.19:8700": 9,
  289. "172.19.0.20:8706": 0,
  290. "172.19.0.21:8710": 9,
  291. },
  292. },
  293. {
  294. name: "topology #2, no free SSD shards available",
  295. topology: topology2,
  296. diskType: types.SsdType,
  297. want: map[string]int{
  298. "172.19.0.10:8702": 0,
  299. "172.19.0.13:8701": 0,
  300. "172.19.0.14:8711": 0,
  301. "172.19.0.16:8704": 0,
  302. "172.19.0.17:8703": 0,
  303. "172.19.0.19:8700": 0,
  304. "172.19.0.20:8706": 0,
  305. "172.19.0.21:8710": 0,
  306. "172.19.0.3:8708": 0,
  307. "172.19.0.4:8707": 0,
  308. "172.19.0.5:8705": 0,
  309. "172.19.0.6:8713": 0,
  310. "172.19.0.8:8709": 0,
  311. "172.19.0.9:8712": 0,
  312. },
  313. },
  314. }
  315. for _, tc := range testCases {
  316. t.Run(tc.name, func(t *testing.T) {
  317. got := map[string]int{}
  318. eachDataNode(tc.topology, func(dc DataCenterId, rack RackId, dn *master_pb.DataNodeInfo) {
  319. got[dn.Id] = countFreeShardSlots(dn, tc.diskType)
  320. })
  321. if !reflect.DeepEqual(got, tc.want) {
  322. t.Errorf("got %v, want %v", got, tc.want)
  323. }
  324. })
  325. }
  326. }