You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

299 lines
8.6 KiB

9 months ago
3 years ago
9 months ago
4 years ago
3 years ago
10 months ago
3 years ago
9 months ago
8 months ago
9 months ago
  1. package shell
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  5. "github.com/stretchr/testify/assert"
  6. "sync"
  7. "testing"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  10. )
  11. type testMoveCase struct {
  12. name string
  13. replication string
  14. replicas []*VolumeReplica
  15. sourceLocation location
  16. targetLocation location
  17. expected bool
  18. }
  19. func TestIsGoodMove(t *testing.T) {
  20. var tests = []testMoveCase{
  21. {
  22. name: "test 100 move to wrong data centers",
  23. replication: "100",
  24. replicas: []*VolumeReplica{
  25. {
  26. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  27. },
  28. {
  29. location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  30. },
  31. },
  32. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  33. targetLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  34. expected: false,
  35. },
  36. {
  37. name: "test 100 move to spread into proper data centers",
  38. replication: "100",
  39. replicas: []*VolumeReplica{
  40. {
  41. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  42. },
  43. {
  44. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  45. },
  46. },
  47. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  48. targetLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  49. expected: true,
  50. },
  51. {
  52. name: "test move to the same node",
  53. replication: "001",
  54. replicas: []*VolumeReplica{
  55. {
  56. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  57. },
  58. {
  59. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  60. },
  61. },
  62. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  63. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  64. expected: false,
  65. },
  66. {
  67. name: "test move to the same rack, but existing node",
  68. replication: "001",
  69. replicas: []*VolumeReplica{
  70. {
  71. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  72. },
  73. {
  74. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  75. },
  76. },
  77. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  78. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  79. expected: false,
  80. },
  81. {
  82. name: "test move to the same rack, a new node",
  83. replication: "001",
  84. replicas: []*VolumeReplica{
  85. {
  86. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  87. },
  88. {
  89. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  90. },
  91. },
  92. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  93. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  94. expected: true,
  95. },
  96. {
  97. name: "test 010 move all to the same rack",
  98. replication: "010",
  99. replicas: []*VolumeReplica{
  100. {
  101. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  102. },
  103. {
  104. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  105. },
  106. },
  107. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  108. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
  109. expected: false,
  110. },
  111. {
  112. name: "test 010 move to spread racks",
  113. replication: "010",
  114. replicas: []*VolumeReplica{
  115. {
  116. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  117. },
  118. {
  119. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  120. },
  121. },
  122. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  123. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
  124. expected: true,
  125. },
  126. {
  127. name: "test 010 move to spread racks",
  128. replication: "010",
  129. replicas: []*VolumeReplica{
  130. {
  131. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  132. },
  133. {
  134. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  135. },
  136. },
  137. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
  138. targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  139. expected: true,
  140. },
  141. {
  142. name: "test 011 switch which rack has more replicas",
  143. replication: "011",
  144. replicas: []*VolumeReplica{
  145. {
  146. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  147. },
  148. {
  149. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  150. },
  151. {
  152. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  153. },
  154. },
  155. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  156. targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn4"}},
  157. expected: true,
  158. },
  159. {
  160. name: "test 011 move the lonely replica to another racks",
  161. replication: "011",
  162. replicas: []*VolumeReplica{
  163. {
  164. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  165. },
  166. {
  167. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  168. },
  169. {
  170. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  171. },
  172. },
  173. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  174. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn4"}},
  175. expected: true,
  176. },
  177. {
  178. name: "test 011 move to wrong racks",
  179. replication: "011",
  180. replicas: []*VolumeReplica{
  181. {
  182. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  183. },
  184. {
  185. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  186. },
  187. {
  188. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  189. },
  190. },
  191. sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  192. targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn4"}},
  193. expected: false,
  194. },
  195. {
  196. name: "test 011 move all to the same rack",
  197. replication: "011",
  198. replicas: []*VolumeReplica{
  199. {
  200. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
  201. },
  202. {
  203. location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
  204. },
  205. {
  206. location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  207. },
  208. },
  209. sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
  210. targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
  211. expected: false,
  212. },
  213. }
  214. for _, tt := range tests {
  215. replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
  216. println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
  217. sourceNode := &Node{
  218. info: tt.sourceLocation.dataNode,
  219. dc: tt.sourceLocation.dc,
  220. rack: tt.sourceLocation.rack,
  221. }
  222. targetNode := &Node{
  223. info: tt.targetLocation.dataNode,
  224. dc: tt.targetLocation.dc,
  225. rack: tt.targetLocation.rack,
  226. }
  227. if isGoodMove(replicaPlacement, tt.replicas, sourceNode, targetNode) != tt.expected {
  228. t.Errorf("%s: expect %v move from %v to %s, replication:%v",
  229. tt.name, tt.expected, tt.sourceLocation, tt.targetLocation, tt.replication)
  230. }
  231. }
  232. }
  233. func TestBalance(t *testing.T) {
  234. topologyInfo := parseOutput(topoData)
  235. volumeServers := collectVolumeServersByDcRackNode(topologyInfo, "", "", "")
  236. volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
  237. diskTypes := collectVolumeDiskTypes(topologyInfo)
  238. applyBalancing := false
  239. parallelBalancing := false
  240. c := commandVolumeBalance{
  241. commandEnv: nil,
  242. lock: sync.RWMutex{},
  243. parallelBalancing: &parallelBalancing,
  244. applyBalancing: &applyBalancing,
  245. diskTypes: diskTypes,
  246. volumeServers: volumeServers,
  247. volumeReplicas: volumeReplicas,
  248. }
  249. if err := c.balanceVolumeServers("ALL_COLLECTIONS"); err != nil {
  250. t.Errorf("balance: %v", err)
  251. }
  252. }
  253. func TestVolumeSelection(t *testing.T) {
  254. topologyInfo := parseOutput(topoData)
  255. vids, err := collectVolumeIdsForTierChange(topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0)
  256. if err != nil {
  257. t.Errorf("collectVolumeIdsForTierChange: %v", err)
  258. }
  259. assert.Equal(t, 378, len(vids))
  260. }
  261. func TestDeleteEmptySelection(t *testing.T) {
  262. topologyInfo := parseOutput(topoData)
  263. eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  264. for _, diskInfo := range dn.DiskInfos {
  265. for _, v := range diskInfo.VolumeInfos {
  266. if v.Size <= super_block.SuperBlockSize && v.ModifiedAtSecond > 0 {
  267. fmt.Printf("empty volume %d from %s\n", v.Id, dn.Id)
  268. }
  269. }
  270. }
  271. })
  272. }