You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

227 lines
7.2 KiB

5 years ago
3 months ago
4 years ago
4 years ago
2 months ago
6 years ago
  1. package shell
  2. import (
  3. "context"
  4. "flag"
  5. "fmt"
  6. "io"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/glog"
  9. "github.com/seaweedfs/seaweedfs/weed/pb"
  10. "google.golang.org/grpc"
  11. "github.com/seaweedfs/seaweedfs/weed/operation"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  15. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  16. )
  17. func init() {
  18. Commands = append(Commands, &commandEcEncode{})
  19. }
  20. type commandEcEncode struct {
  21. }
  22. func (c *commandEcEncode) Name() string {
  23. return "ec.encode"
  24. }
  25. func (c *commandEcEncode) Help() string {
  26. return `apply erasure coding to a volume
  27. ec.encode [-collection=""] [-fullPercent=95 -quietFor=1h]
  28. ec.encode [-collection=""] [-volumeId=<volume_id>]
  29. This command will:
  30. 1. freeze one volume
  31. 2. apply erasure coding to the volume
  32. 3. (optionally) re-balance encoded shards across multiple volume servers
  33. The erasure coding is 10.4. So ideally you have more than 14 volume servers, and you can afford
  34. to lose 4 volume servers.
  35. If the number of volumes are not high, the worst case is that you only have 4 volume servers,
  36. and the shards are spread as 4,4,3,3, respectively. You can afford to lose one volume server.
  37. If you only have less than 4 volume servers, with erasure coding, at least you can afford to
  38. have 4 corrupted shard files.
  39. Re-balancing algorithm:
  40. ` + ecBalanceAlgorithmDescription
  41. }
  42. func (c *commandEcEncode) HasTag(CommandTag) bool {
  43. return false
  44. }
  45. func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  46. encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  47. volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
  48. collection := encodeCommand.String("collection", "", "the collection name")
  49. fullPercentage := encodeCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
  50. quietPeriod := encodeCommand.Duration("quietFor", time.Hour, "select volumes without no writes for this period")
  51. // TODO: Add concurrency support to EcBalance and reenable this switch?
  52. //parallelCopy := encodeCommand.Bool("parallelCopy", true, "copy shards in parallel")
  53. forceChanges := encodeCommand.Bool("force", false, "force the encoding even if the cluster has less than recommended 4 nodes")
  54. shardReplicaPlacement := encodeCommand.String("shardReplicaPlacement", "", "replica placement for EC shards, or master default if empty")
  55. applyBalancing := encodeCommand.Bool("rebalance", false, "re-balance EC shards after creation")
  56. if err = encodeCommand.Parse(args); err != nil {
  57. return nil
  58. }
  59. if err = commandEnv.confirmIsLocked(args); err != nil {
  60. return
  61. }
  62. rp, err := parseReplicaPlacementArg(commandEnv, *shardReplicaPlacement)
  63. if err != nil {
  64. return err
  65. }
  66. // collect topology information
  67. topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
  68. if err != nil {
  69. return err
  70. }
  71. if !*forceChanges {
  72. var nodeCount int
  73. eachDataNode(topologyInfo, func(dc DataCenterId, rack RackId, dn *master_pb.DataNodeInfo) {
  74. nodeCount++
  75. })
  76. if nodeCount < erasure_coding.ParityShardsCount {
  77. glog.V(0).Infof("skip erasure coding with %d nodes, less than recommended %d nodes", nodeCount, erasure_coding.ParityShardsCount)
  78. return nil
  79. }
  80. }
  81. var volumeIds []needle.VolumeId
  82. if vid := needle.VolumeId(*volumeId); vid != 0 {
  83. // volumeId is provided
  84. volumeIds = append(volumeIds, vid)
  85. } else {
  86. // apply to all volumes in the collection
  87. volumeIds, err = collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
  88. if err != nil {
  89. return err
  90. }
  91. }
  92. var collections []string
  93. if *collection != "" {
  94. collections = []string{*collection}
  95. } else {
  96. collections = collectCollectionsForVolumeIds(topologyInfo, volumeIds)
  97. }
  98. // encode all requested volumes...
  99. for _, vid := range volumeIds {
  100. if err = doEcEncode(commandEnv, *collection, vid); err != nil {
  101. return fmt.Errorf("ec encode for volume %d: %v", vid, err)
  102. }
  103. }
  104. // ...then re-balance ec shards.
  105. if err := EcBalance(commandEnv, collections, "", rp, *applyBalancing); err != nil {
  106. return fmt.Errorf("re-balance ec shards for collection(s) %v: %v", collections, err)
  107. }
  108. return nil
  109. }
  110. func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) error {
  111. if !commandEnv.isLocked() {
  112. return fmt.Errorf("lock is lost")
  113. }
  114. // find volume location
  115. locations, found := commandEnv.MasterClient.GetLocationsClone(uint32(vid))
  116. if !found {
  117. return fmt.Errorf("volume %d not found", vid)
  118. }
  119. // fmt.Printf("found ec %d shards on %v\n", vid, locations)
  120. // mark the volume as readonly
  121. if err := markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, false, false); err != nil {
  122. return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
  123. }
  124. // generate ec shards
  125. if err := generateEcShards(commandEnv.option.GrpcDialOption, vid, collection, locations[0].ServerAddress()); err != nil {
  126. return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err)
  127. }
  128. return nil
  129. }
  130. func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {
  131. fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer)
  132. err := operation.WithVolumeServerClient(false, sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  133. _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{
  134. VolumeId: uint32(volumeId),
  135. Collection: collection,
  136. })
  137. return genErr
  138. })
  139. return err
  140. }
  141. func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
  142. // collect topology information
  143. topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0)
  144. if err != nil {
  145. return
  146. }
  147. quietSeconds := int64(quietPeriod / time.Second)
  148. nowUnixSeconds := time.Now().Unix()
  149. fmt.Printf("collect volumes quiet for: %d seconds and %.1f%% full\n", quietSeconds, fullPercentage)
  150. vidMap := make(map[uint32]bool)
  151. eachDataNode(topologyInfo, func(dc DataCenterId, rack RackId, dn *master_pb.DataNodeInfo) {
  152. for _, diskInfo := range dn.DiskInfos {
  153. for _, v := range diskInfo.VolumeInfos {
  154. // ignore remote volumes
  155. if v.RemoteStorageName != "" && v.RemoteStorageKey != "" {
  156. continue
  157. }
  158. if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
  159. if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 {
  160. if good, found := vidMap[v.Id]; found {
  161. if good {
  162. if diskInfo.FreeVolumeCount < 2 {
  163. glog.V(0).Infof("skip %s %d on %s, no free disk", v.Collection, v.Id, dn.Id)
  164. vidMap[v.Id] = false
  165. }
  166. }
  167. } else {
  168. if diskInfo.FreeVolumeCount < 2 {
  169. glog.V(0).Infof("skip %s %d on %s, no free disk", v.Collection, v.Id, dn.Id)
  170. vidMap[v.Id] = false
  171. } else {
  172. vidMap[v.Id] = true
  173. }
  174. }
  175. }
  176. }
  177. }
  178. }
  179. })
  180. for vid, good := range vidMap {
  181. if good {
  182. vids = append(vids, needle.VolumeId(vid))
  183. }
  184. }
  185. return
  186. }