You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

288 lines
8.3 KiB

9 months ago
3 years ago
9 months ago
4 years ago
3 years ago
9 months ago
3 years ago
9 months ago
7 months ago
9 months ago
  1. package shell
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  5. "github.com/stretchr/testify/assert"
  6. "testing"
  7. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  8. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  9. )
  10. type testMoveCase struct {
  11. name string
  12. replication string
  13. replicas []*VolumeReplica
  14. sourceLocation location
  15. targetLocation location
  16. expected bool
  17. }
  18. func TestIsGoodMove(t *testing.T) {
  19. var tests = []testMoveCase{
  20. {
  21. name: "test 100 move to wrong data centers",
  22. replication: "100",
  23. replicas: []*VolumeReplica{
  24. {
  25. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  26. },
  27. {
  28. location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  29. },
  30. },
  31. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  32. targetLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  33. expected: false,
  34. },
  35. {
  36. name: "test 100 move to spread into proper data centers",
  37. replication: "100",
  38. replicas: []*VolumeReplica{
  39. {
  40. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  41. },
  42. {
  43. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  44. },
  45. },
  46. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  47. targetLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  48. expected: true,
  49. },
  50. {
  51. name: "test move to the same node",
  52. replication: "001",
  53. replicas: []*VolumeReplica{
  54. {
  55. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  56. },
  57. {
  58. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  59. },
  60. },
  61. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  62. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  63. expected: false,
  64. },
  65. {
  66. name: "test move to the same rack, but existing node",
  67. replication: "001",
  68. replicas: []*VolumeReplica{
  69. {
  70. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  71. },
  72. {
  73. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  74. },
  75. },
  76. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  77. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  78. expected: false,
  79. },
  80. {
  81. name: "test move to the same rack, a new node",
  82. replication: "001",
  83. replicas: []*VolumeReplica{
  84. {
  85. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  86. },
  87. {
  88. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  89. },
  90. },
  91. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  92. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  93. expected: true,
  94. },
  95. {
  96. name: "test 010 move all to the same rack",
  97. replication: "010",
  98. replicas: []*VolumeReplica{
  99. {
  100. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  101. },
  102. {
  103. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  104. },
  105. },
  106. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  107. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  108. expected: false,
  109. },
  110. {
  111. name: "test 010 move to spread racks",
  112. replication: "010",
  113. replicas: []*VolumeReplica{
  114. {
  115. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  116. },
  117. {
  118. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  119. },
  120. },
  121. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  122. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  123. expected: true,
  124. },
  125. {
  126. name: "test 010 move to spread racks",
  127. replication: "010",
  128. replicas: []*VolumeReplica{
  129. {
  130. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  131. },
  132. {
  133. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  134. },
  135. },
  136. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  137. targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  138. expected: true,
  139. },
  140. {
  141. name: "test 011 switch which rack has more replicas",
  142. replication: "011",
  143. replicas: []*VolumeReplica{
  144. {
  145. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  146. },
  147. {
  148. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  149. },
  150. {
  151. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  152. },
  153. },
  154. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  155. targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn4"}},
  156. expected: true,
  157. },
  158. {
  159. name: "test 011 move the lonely replica to another racks",
  160. replication: "011",
  161. replicas: []*VolumeReplica{
  162. {
  163. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  164. },
  165. {
  166. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  167. },
  168. {
  169. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  170. },
  171. },
  172. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  173. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn4"}},
  174. expected: true,
  175. },
  176. {
  177. name: "test 011 move to wrong racks",
  178. replication: "011",
  179. replicas: []*VolumeReplica{
  180. {
  181. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  182. },
  183. {
  184. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  185. },
  186. {
  187. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  188. },
  189. },
  190. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  191. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn4"}},
  192. expected: false,
  193. },
  194. {
  195. name: "test 011 move all to the same rack",
  196. replication: "011",
  197. replicas: []*VolumeReplica{
  198. {
  199. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  200. },
  201. {
  202. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  203. },
  204. {
  205. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  206. },
  207. },
  208. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  209. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
  210. expected: false,
  211. },
  212. }
  213. for _, tt := range tests {
  214. replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
  215. println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
  216. sourceNode := &Node{
  217. info: tt.sourceLocation.dataNode,
  218. dc: tt.sourceLocation.dc,
  219. rack: tt.sourceLocation.rack,
  220. }
  221. targetNode := &Node{
  222. info: tt.targetLocation.dataNode,
  223. dc: tt.targetLocation.dc,
  224. rack: tt.targetLocation.rack,
  225. }
  226. if isGoodMove(replicaPlacement, tt.replicas, sourceNode, targetNode) != tt.expected {
  227. t.Errorf("%s: expect %v move from %v to %s, replication:%v",
  228. tt.name, tt.expected, tt.sourceLocation, tt.targetLocation, tt.replication)
  229. }
  230. }
  231. }
  232. func TestBalance(t *testing.T) {
  233. topologyInfo := parseOutput(topoData)
  234. volumeServers := collectVolumeServersByDc(topologyInfo, "")
  235. volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
  236. diskTypes := collectVolumeDiskTypes(topologyInfo)
  237. if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, "ALL_COLLECTIONS", false); err != nil {
  238. t.Errorf("balance: %v", err)
  239. }
  240. }
  241. func TestVolumeSelection(t *testing.T) {
  242. topologyInfo := parseOutput(topoData)
  243. vids, err := collectVolumeIdsForTierChange(topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0)
  244. if err != nil {
  245. t.Errorf("collectVolumeIdsForTierChange: %v", err)
  246. }
  247. assert.Equal(t, 378, len(vids))
  248. }
  249. func TestDeleteEmptySelection(t *testing.T) {
  250. topologyInfo := parseOutput(topoData)
  251. eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  252. for _, diskInfo := range dn.DiskInfos {
  253. for _, v := range diskInfo.VolumeInfos {
  254. if v.Size <= super_block.SuperBlockSize && v.ModifiedAtSecond > 0 {
  255. fmt.Printf("empty volume %d from %s\n", v.Id, dn.Id)
  256. }
  257. }
  258. }
  259. })
  260. }