You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

296 lines
9.5 KiB

3 years ago
3 years ago
4 years ago
3 years ago
3 years ago
3 years ago
4 years ago
  1. package shell
  2. import (
  3. "context"
  4. "flag"
  5. "fmt"
  6. "github.com/seaweedfs/seaweedfs/weed/glog"
  7. "github.com/seaweedfs/seaweedfs/weed/pb"
  8. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  9. "io"
  10. "time"
  11. "google.golang.org/grpc"
  12. "github.com/seaweedfs/seaweedfs/weed/operation"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  14. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  15. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  16. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  17. )
  18. func init() {
  19. Commands = append(Commands, &commandEcDecode{})
  20. }
  21. type commandEcDecode struct {
  22. }
  23. func (c *commandEcDecode) Name() string {
  24. return "ec.decode"
  25. }
  26. func (c *commandEcDecode) Help() string {
  27. return `decode a erasure coded volume into a normal volume
  28. ec.decode [-collection=""] [-volumeId=<volume_id>]
  29. `
  30. }
  31. func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  32. encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  33. volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
  34. collection := encodeCommand.String("collection", "", "the collection name")
  35. forceChanges := encodeCommand.Bool("force", false, "force the encoding even if the cluster has less than recommended 4 nodes")
  36. if err = encodeCommand.Parse(args); err != nil {
  37. return nil
  38. }
  39. infoAboutSimulationMode(writer, *forceChanges, "-force")
  40. if err = commandEnv.confirmIsLocked(args); err != nil {
  41. return
  42. }
  43. vid := needle.VolumeId(*volumeId)
  44. // collect topology information
  45. topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
  46. if err != nil {
  47. return err
  48. }
  49. if !*forceChanges {
  50. var nodeCount int
  51. eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  52. nodeCount++
  53. })
  54. if nodeCount < erasure_coding.ParityShardsCount {
  55. glog.V(0).Infof("skip erasure coding with %d nodes, less than recommended %d nodes", nodeCount, erasure_coding.ParityShardsCount)
  56. return nil
  57. }
  58. }
  59. // volumeId is provided
  60. if vid != 0 {
  61. return doEcDecode(commandEnv, topologyInfo, *collection, vid)
  62. }
  63. // apply to all volumes in the collection
  64. volumeIds := collectEcShardIds(topologyInfo, *collection)
  65. fmt.Printf("ec encode volumes: %v\n", volumeIds)
  66. for _, vid := range volumeIds {
  67. if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {
  68. return err
  69. }
  70. }
  71. return nil
  72. }
  73. func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
  74. if !commandEnv.isLocked() {
  75. return fmt.Errorf("lock is lost")
  76. }
  77. // find volume location
  78. nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)
  79. fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits)
  80. // collect ec shards to the server with most space
  81. targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)
  82. if err != nil {
  83. return fmt.Errorf("collectEcShards for volume %d: %v", vid, err)
  84. }
  85. // generate a normal volume
  86. err = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation)
  87. if err != nil {
  88. return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err)
  89. }
  90. // delete the previous ec shards
  91. err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
  92. if err != nil {
  93. return fmt.Errorf("delete ec shards for volume %d: %v", vid, err)
  94. }
  95. return nil
  96. }
  97. func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection string, targetNodeLocation pb.ServerAddress, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, vid needle.VolumeId) error {
  98. // mount volume
  99. if err := operation.WithVolumeServerClient(false, targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  100. _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
  101. VolumeId: uint32(vid),
  102. })
  103. return mountErr
  104. }); err != nil {
  105. return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err)
  106. }
  107. // unmount ec shards
  108. for location, ecIndexBits := range nodeToEcIndexBits {
  109. fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
  110. err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
  111. if err != nil {
  112. return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err)
  113. }
  114. }
  115. // delete ec shards
  116. for location, ecIndexBits := range nodeToEcIndexBits {
  117. fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
  118. err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
  119. if err != nil {
  120. return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err)
  121. }
  122. }
  123. return nil
  124. }
  125. func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {
  126. fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer)
  127. err := operation.WithVolumeServerClient(false, sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  128. _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{
  129. VolumeId: uint32(vid),
  130. Collection: collection,
  131. })
  132. return genErr
  133. })
  134. return err
  135. }
  136. func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation pb.ServerAddress, err error) {
  137. maxShardCount := 0
  138. var existingEcIndexBits erasure_coding.ShardBits
  139. for loc, ecIndexBits := range nodeToEcIndexBits {
  140. toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount()
  141. if toBeCopiedShardCount > maxShardCount {
  142. maxShardCount = toBeCopiedShardCount
  143. targetNodeLocation = loc
  144. existingEcIndexBits = ecIndexBits
  145. }
  146. }
  147. fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits)
  148. var copiedEcIndexBits erasure_coding.ShardBits
  149. for loc, ecIndexBits := range nodeToEcIndexBits {
  150. if loc == targetNodeLocation {
  151. continue
  152. }
  153. needToCopyEcIndexBits := ecIndexBits.Minus(existingEcIndexBits).MinusParityShards()
  154. if needToCopyEcIndexBits.ShardIdCount() == 0 {
  155. continue
  156. }
  157. err = operation.WithVolumeServerClient(false, targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  158. fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)
  159. _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
  160. VolumeId: uint32(vid),
  161. Collection: collection,
  162. ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
  163. CopyEcxFile: false,
  164. CopyEcjFile: true,
  165. CopyVifFile: true,
  166. SourceDataNode: string(loc),
  167. })
  168. if copyErr != nil {
  169. return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr)
  170. }
  171. return nil
  172. })
  173. if err != nil {
  174. break
  175. }
  176. copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits)
  177. }
  178. nodeToEcIndexBits[targetNodeLocation] = existingEcIndexBits.Plus(copiedEcIndexBits)
  179. return targetNodeLocation, err
  180. }
  181. func lookupVolumeIds(commandEnv *CommandEnv, volumeIds []string) (volumeIdLocations []*master_pb.LookupVolumeResponse_VolumeIdLocation, err error) {
  182. var resp *master_pb.LookupVolumeResponse
  183. err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
  184. resp, err = client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{VolumeOrFileIds: volumeIds})
  185. return err
  186. })
  187. if err != nil {
  188. return nil, err
  189. }
  190. return resp.VolumeIdLocations, nil
  191. }
  192. func collectTopologyInfo(commandEnv *CommandEnv, delayBeforeCollecting time.Duration) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {
  193. if delayBeforeCollecting > 0 {
  194. time.Sleep(delayBeforeCollecting)
  195. }
  196. var resp *master_pb.VolumeListResponse
  197. err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
  198. resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
  199. return err
  200. })
  201. if err != nil {
  202. return
  203. }
  204. return resp.TopologyInfo, resp.VolumeSizeLimitMb, nil
  205. }
  206. func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {
  207. vidMap := make(map[uint32]bool)
  208. eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  209. if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
  210. for _, v := range diskInfo.EcShardInfos {
  211. if v.Collection == selectedCollection {
  212. vidMap[v.Id] = true
  213. }
  214. }
  215. }
  216. })
  217. for vid := range vidMap {
  218. vids = append(vids, needle.VolumeId(vid))
  219. }
  220. return
  221. }
  222. func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[pb.ServerAddress]erasure_coding.ShardBits {
  223. nodeToEcIndexBits := make(map[pb.ServerAddress]erasure_coding.ShardBits)
  224. eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  225. if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
  226. for _, v := range diskInfo.EcShardInfos {
  227. if v.Id == uint32(vid) {
  228. nodeToEcIndexBits[pb.NewServerAddressFromDataNode(dn)] = erasure_coding.ShardBits(v.EcIndexBits)
  229. }
  230. }
  231. }
  232. })
  233. return nodeToEcIndexBits
  234. }