You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

220 lines
7.2 KiB

4 years ago
4 years ago
  1. package shell
  2. import (
  3. "context"
  4. "flag"
  5. "fmt"
  6. "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
  7. "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
  8. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  9. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  10. "github.com/chrislusf/seaweedfs/weed/storage/types"
  11. "io"
  12. "os"
  13. "sort"
  14. )
  15. func init() {
  16. Commands = append(Commands, &commandVolumeServerEvacuate{})
  17. }
  18. type commandVolumeServerEvacuate struct {
  19. }
  20. func (c *commandVolumeServerEvacuate) Name() string {
  21. return "volumeServer.evacuate"
  22. }
  23. func (c *commandVolumeServerEvacuate) Help() string {
  24. return `move out all data on a volume server
  25. volumeServer.evacuate -node <host:port>
  26. This command moves all data away from the volume server.
  27. The volumes on the volume servers will be redistributed.
  28. Usually this is used to prepare to shutdown or upgrade the volume server.
  29. Sometimes a volume can not be moved because there are no
  30. good destination to meet the replication requirement.
  31. E.g. a volume replication 001 in a cluster with 2 volume servers can not be moved.
  32. You can use "-skipNonMoveable" to move the rest volumes.
  33. `
  34. }
  35. func (c *commandVolumeServerEvacuate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  36. if err = commandEnv.confirmIsLocked(); err != nil {
  37. return
  38. }
  39. vsEvacuateCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  40. volumeServer := vsEvacuateCommand.String("node", "", "<host>:<port> of the volume server")
  41. skipNonMoveable := vsEvacuateCommand.Bool("skipNonMoveable", false, "skip volumes that can not be moved")
  42. applyChange := vsEvacuateCommand.Bool("force", false, "actually apply the changes")
  43. if err = vsEvacuateCommand.Parse(args); err != nil {
  44. return nil
  45. }
  46. if *volumeServer == "" {
  47. return fmt.Errorf("need to specify volume server by -node=<host>:<port>")
  48. }
  49. return volumeServerEvacuate(commandEnv, *volumeServer, *skipNonMoveable, *applyChange, writer)
  50. }
  51. func volumeServerEvacuate(commandEnv *CommandEnv, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) (err error) {
  52. // 1. confirm the volume server is part of the cluster
  53. // 2. collect all other volume servers, sort by empty slots
  54. // 3. move to any other volume server as long as it satisfy the replication requirements
  55. // list all the volumes
  56. var resp *master_pb.VolumeListResponse
  57. err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
  58. resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
  59. return err
  60. })
  61. if err != nil {
  62. return err
  63. }
  64. if err := evacuateNormalVolumes(commandEnv, resp, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
  65. return err
  66. }
  67. if err := evacuateEcVolumes(commandEnv, resp, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
  68. return err
  69. }
  70. return nil
  71. }
  72. func evacuateNormalVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListResponse, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
  73. // find this volume server
  74. volumeServers := collectVolumeServersByDc(resp.TopologyInfo, "")
  75. thisNode, otherNodes := nodesOtherThan(volumeServers, volumeServer)
  76. if thisNode == nil {
  77. return fmt.Errorf("%s is not found in this cluster", volumeServer)
  78. }
  79. // move away normal volumes
  80. volumeReplicas, _ := collectVolumeReplicaLocations(resp)
  81. for _, diskInfo := range thisNode.info.DiskInfos {
  82. for _, vol := range diskInfo.VolumeInfos {
  83. hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange)
  84. if err != nil {
  85. return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err)
  86. }
  87. if !hasMoved {
  88. if skipNonMoveable {
  89. replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement))
  90. fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String())
  91. } else {
  92. return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer)
  93. }
  94. }
  95. }
  96. }
  97. return nil
  98. }
  99. func evacuateEcVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListResponse, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
  100. // find this ec volume server
  101. ecNodes, _ := collectEcVolumeServersByDc(resp.TopologyInfo, "")
  102. thisNode, otherNodes := ecNodesOtherThan(ecNodes, volumeServer)
  103. if thisNode == nil {
  104. return fmt.Errorf("%s is not found in this cluster\n", volumeServer)
  105. }
  106. // move away ec volumes
  107. for _, diskInfo := range thisNode.info.DiskInfos {
  108. for _, ecShardInfo := range diskInfo.EcShardInfos {
  109. hasMoved, err := moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange)
  110. if err != nil {
  111. return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err)
  112. }
  113. if !hasMoved {
  114. if skipNonMoveable {
  115. fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer)
  116. } else {
  117. return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer)
  118. }
  119. }
  120. }
  121. }
  122. return nil
  123. }
  124. func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEcShardInformationMessage, thisNode *EcNode, otherNodes []*EcNode, applyChange bool) (hasMoved bool, err error) {
  125. for _, shardId := range erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() {
  126. sort.Slice(otherNodes, func(i, j int) bool {
  127. return otherNodes[i].localShardIdCount(ecShardInfo.Id) < otherNodes[j].localShardIdCount(ecShardInfo.Id)
  128. })
  129. for i := 0; i < len(otherNodes); i++ {
  130. emptyNode := otherNodes[i]
  131. collectionPrefix := ""
  132. if ecShardInfo.Collection != "" {
  133. collectionPrefix = ecShardInfo.Collection + "_"
  134. }
  135. fmt.Fprintf(os.Stdout, "moving ec volume %s%d.%d %s => %s\n", collectionPrefix, ecShardInfo.Id, shardId, thisNode.info.Id, emptyNode.info.Id)
  136. err = moveMountedShardToEcNode(commandEnv, thisNode, ecShardInfo.Collection, needle.VolumeId(ecShardInfo.Id), shardId, emptyNode, applyChange)
  137. if err != nil {
  138. return
  139. } else {
  140. hasMoved = true
  141. break
  142. }
  143. }
  144. if !hasMoved {
  145. return
  146. }
  147. }
  148. return
  149. }
  150. func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, vol *master_pb.VolumeInformationMessage, thisNode *Node, otherNodes []*Node, applyChange bool) (hasMoved bool, err error) {
  151. fn := capacityByFreeVolumeCount(types.ToDiskType(vol.DiskType))
  152. sort.Slice(otherNodes, func(i, j int) bool {
  153. return otherNodes[i].localVolumeRatio(fn) > otherNodes[j].localVolumeRatio(fn)
  154. })
  155. for i := 0; i < len(otherNodes); i++ {
  156. emptyNode := otherNodes[i]
  157. hasMoved, err = maybeMoveOneVolume(commandEnv, volumeReplicas, thisNode, vol, emptyNode, applyChange)
  158. if err != nil {
  159. return
  160. }
  161. if hasMoved {
  162. break
  163. }
  164. }
  165. return
  166. }
  167. func nodesOtherThan(volumeServers []*Node, thisServer string) (thisNode *Node, otherNodes []*Node) {
  168. for _, node := range volumeServers {
  169. if node.info.Id == thisServer {
  170. thisNode = node
  171. continue
  172. }
  173. otherNodes = append(otherNodes, node)
  174. }
  175. return
  176. }
  177. func ecNodesOtherThan(volumeServers []*EcNode, thisServer string) (thisNode *EcNode, otherNodes []*EcNode) {
  178. for _, node := range volumeServers {
  179. if node.info.Id == thisServer {
  180. thisNode = node
  181. continue
  182. }
  183. otherNodes = append(otherNodes, node)
  184. }
  185. return
  186. }