You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

339 lines
10 KiB

  1. package shell
  2. import (
  3. "context"
  4. "fmt"
  5. "math"
  6. "sort"
  7. "github.com/chrislusf/seaweedfs/weed/glog"
  8. "github.com/chrislusf/seaweedfs/weed/operation"
  9. "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
  10. "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
  11. "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
  12. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  13. "google.golang.org/grpc"
  14. )
  15. func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {
  16. copiedShardIds := []uint32{uint32(shardId)}
  17. if applyBalancing {
  18. // ask destination node to copy shard and the ecx file from source node, and mount it
  19. copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id)
  20. if err != nil {
  21. return err
  22. }
  23. // unmount the to be deleted shards
  24. err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds)
  25. if err != nil {
  26. return err
  27. }
  28. // ask source node to delete the shard, and maybe the ecx file
  29. err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
  30. if err != nil {
  31. return err
  32. }
  33. fmt.Printf("moved ec shard %d.%d %s => %s\n", vid, shardId, existingLocation.info.Id, destinationEcNode.info.Id)
  34. }
  35. destinationEcNode.addEcVolumeShards(vid, collection, copiedShardIds)
  36. existingLocation.deleteEcVolumeShards(vid, copiedShardIds)
  37. return nil
  38. }
  39. func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption,
  40. targetServer *EcNode, shardIdsToCopy []uint32,
  41. volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) {
  42. fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)
  43. err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  44. if targetServer.info.Id != existingLocation {
  45. fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)
  46. _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
  47. VolumeId: uint32(volumeId),
  48. Collection: collection,
  49. ShardIds: shardIdsToCopy,
  50. CopyEcxFile: true,
  51. CopyEcjFile: true,
  52. SourceDataNode: existingLocation,
  53. })
  54. if copyErr != nil {
  55. return fmt.Errorf("copy %d.%v %s => %s : %v\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id, copyErr)
  56. }
  57. }
  58. fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id)
  59. _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
  60. VolumeId: uint32(volumeId),
  61. Collection: collection,
  62. ShardIds: shardIdsToCopy,
  63. })
  64. if mountErr != nil {
  65. return fmt.Errorf("mount %d.%v on %s : %v\n", volumeId, shardIdsToCopy, targetServer.info.Id, mountErr)
  66. }
  67. if targetServer.info.Id != existingLocation {
  68. copiedShardIds = shardIdsToCopy
  69. glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds)
  70. }
  71. return nil
  72. })
  73. if err != nil {
  74. return
  75. }
  76. return
  77. }
  78. func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, dn *master_pb.DataNodeInfo)) {
  79. for _, dc := range topo.DataCenterInfos {
  80. for _, rack := range dc.RackInfos {
  81. for _, dn := range rack.DataNodeInfos {
  82. fn(dc.Id, RackId(rack.Id), dn)
  83. }
  84. }
  85. }
  86. }
  87. func sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) {
  88. sort.Slice(ecNodes, func(i, j int) bool {
  89. return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot
  90. })
  91. }
  92. func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
  93. sort.Slice(ecNodes, func(i, j int) bool {
  94. return ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot
  95. })
  96. }
  97. type CandidateEcNode struct {
  98. ecNode *EcNode
  99. shardCount int
  100. }
  101. // if the index node changed the freeEcSlot, need to keep every EcNode still sorted
  102. func ensureSortedEcNodes(data []*CandidateEcNode, index int, lessThan func(i, j int) bool) {
  103. for i := index - 1; i >= 0; i-- {
  104. if lessThan(i+1, i) {
  105. swap(data, i, i+1)
  106. } else {
  107. break
  108. }
  109. }
  110. for i := index + 1; i < len(data); i++ {
  111. if lessThan(i, i-1) {
  112. swap(data, i, i-1)
  113. } else {
  114. break
  115. }
  116. }
  117. }
  118. func swap(data []*CandidateEcNode, i, j int) {
  119. t := data[i]
  120. data[i] = data[j]
  121. data[j] = t
  122. }
  123. func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) {
  124. for _, ecShardInfo := range ecShardInfos {
  125. shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
  126. count += shardBits.ShardIdCount()
  127. }
  128. return
  129. }
  130. func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) {
  131. return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos)
  132. }
  133. type RackId string
  134. type EcNodeId string
  135. type EcNode struct {
  136. info *master_pb.DataNodeInfo
  137. dc string
  138. rack RackId
  139. freeEcSlot int
  140. }
  141. type EcRack struct {
  142. ecNodes map[EcNodeId]*EcNode
  143. freeEcSlot int
  144. }
  145. func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {
  146. // list all possible locations
  147. var resp *master_pb.VolumeListResponse
  148. err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
  149. resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
  150. return err
  151. })
  152. if err != nil {
  153. return nil, 0, err
  154. }
  155. // find out all volume servers with one slot left.
  156. eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  157. if selectedDataCenter != "" && selectedDataCenter != dc {
  158. return
  159. }
  160. freeEcSlots := countFreeShardSlots(dn)
  161. ecNodes = append(ecNodes, &EcNode{
  162. info: dn,
  163. dc: dc,
  164. rack: rack,
  165. freeEcSlot: int(freeEcSlots),
  166. })
  167. totalFreeEcSlots += freeEcSlots
  168. })
  169. sortEcNodesByFreeslotsDecending(ecNodes)
  170. return
  171. }
  172. func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
  173. collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error {
  174. fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation)
  175. return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  176. _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
  177. VolumeId: uint32(volumeId),
  178. Collection: collection,
  179. ShardIds: toBeDeletedShardIds,
  180. })
  181. return deleteErr
  182. })
  183. }
  184. func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
  185. volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error {
  186. fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation)
  187. return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  188. _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{
  189. VolumeId: uint32(volumeId),
  190. ShardIds: toBeUnmountedhardIds,
  191. })
  192. return deleteErr
  193. })
  194. }
  195. func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
  196. collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error {
  197. fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation)
  198. return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  199. _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
  200. VolumeId: uint32(volumeId),
  201. Collection: collection,
  202. ShardIds: toBeMountedhardIds,
  203. })
  204. return mountErr
  205. })
  206. }
  207. func ceilDivide(total, n int) int {
  208. return int(math.Ceil(float64(total) / float64(n)))
  209. }
  210. func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
  211. for _, shardInfo := range ecNode.info.EcShardInfos {
  212. if needle.VolumeId(shardInfo.Id) == vid {
  213. return erasure_coding.ShardBits(shardInfo.EcIndexBits)
  214. }
  215. }
  216. return 0
  217. }
  218. func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {
  219. foundVolume := false
  220. for _, shardInfo := range ecNode.info.EcShardInfos {
  221. if needle.VolumeId(shardInfo.Id) == vid {
  222. oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
  223. newShardBits := oldShardBits
  224. for _, shardId := range shardIds {
  225. newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
  226. }
  227. shardInfo.EcIndexBits = uint32(newShardBits)
  228. ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
  229. foundVolume = true
  230. break
  231. }
  232. }
  233. if !foundVolume {
  234. var newShardBits erasure_coding.ShardBits
  235. for _, shardId := range shardIds {
  236. newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
  237. }
  238. ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
  239. Id: uint32(vid),
  240. Collection: collection,
  241. EcIndexBits: uint32(newShardBits),
  242. })
  243. ecNode.freeEcSlot -= len(shardIds)
  244. }
  245. return ecNode
  246. }
  247. func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {
  248. for _, shardInfo := range ecNode.info.EcShardInfos {
  249. if needle.VolumeId(shardInfo.Id) == vid {
  250. oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
  251. newShardBits := oldShardBits
  252. for _, shardId := range shardIds {
  253. newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
  254. }
  255. shardInfo.EcIndexBits = uint32(newShardBits)
  256. ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
  257. }
  258. }
  259. return ecNode
  260. }
  261. func groupByCount(data []*EcNode, identifierFn func(*EcNode) (id string, count int)) map[string]int {
  262. countMap := make(map[string]int)
  263. for _, d := range data {
  264. id, count := identifierFn(d)
  265. countMap[id] += count
  266. }
  267. return countMap
  268. }
  269. func groupBy(data []*EcNode, identifierFn func(*EcNode) (id string)) map[string][]*EcNode {
  270. groupMap := make(map[string][]*EcNode)
  271. for _, d := range data {
  272. id := identifierFn(d)
  273. groupMap[id] = append(groupMap[id], d)
  274. }
  275. return groupMap
  276. }