You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

285 lines
9.3 KiB

3 years ago
4 years ago
3 years ago
4 years ago
3 years ago
3 years ago
3 years ago
4 years ago
4 years ago
  1. package shell
  2. import (
  3. "context"
  4. "flag"
  5. "fmt"
  6. "github.com/chrislusf/seaweedfs/weed/glog"
  7. "github.com/chrislusf/seaweedfs/weed/pb"
  8. "github.com/chrislusf/seaweedfs/weed/storage/types"
  9. "io"
  10. "google.golang.org/grpc"
  11. "github.com/chrislusf/seaweedfs/weed/operation"
  12. "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
  13. "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
  14. "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
  15. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  16. )
  17. func init() {
  18. Commands = append(Commands, &commandEcDecode{})
  19. }
  20. type commandEcDecode struct {
  21. }
  22. func (c *commandEcDecode) Name() string {
  23. return "ec.decode"
  24. }
  25. func (c *commandEcDecode) Help() string {
  26. return `decode a erasure coded volume into a normal volume
  27. ec.decode [-collection=""] [-volumeId=<volume_id>]
  28. `
  29. }
  30. func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  31. encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  32. volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
  33. collection := encodeCommand.String("collection", "", "the collection name")
  34. forceChanges := encodeCommand.Bool("force", false, "force the encoding even if the cluster has less than recommended 4 nodes")
  35. if err = encodeCommand.Parse(args); err != nil {
  36. return nil
  37. }
  38. if err = commandEnv.confirmIsLocked(args); err != nil {
  39. return
  40. }
  41. vid := needle.VolumeId(*volumeId)
  42. // collect topology information
  43. topologyInfo, _, err := collectTopologyInfo(commandEnv)
  44. if err != nil {
  45. return err
  46. }
  47. if !*forceChanges {
  48. var nodeCount int
  49. eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  50. nodeCount++
  51. })
  52. if nodeCount < erasure_coding.ParityShardsCount {
  53. glog.V(0).Infof("skip erasure coding with %d nodes, less than recommended %d nodes", nodeCount, erasure_coding.ParityShardsCount)
  54. return nil
  55. }
  56. }
  57. // volumeId is provided
  58. if vid != 0 {
  59. return doEcDecode(commandEnv, topologyInfo, *collection, vid)
  60. }
  61. // apply to all volumes in the collection
  62. volumeIds := collectEcShardIds(topologyInfo, *collection)
  63. fmt.Printf("ec encode volumes: %v\n", volumeIds)
  64. for _, vid := range volumeIds {
  65. if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {
  66. return err
  67. }
  68. }
  69. return nil
  70. }
  71. func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
  72. // find volume location
  73. nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)
  74. fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits)
  75. // collect ec shards to the server with most space
  76. targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)
  77. if err != nil {
  78. return fmt.Errorf("collectEcShards for volume %d: %v", vid, err)
  79. }
  80. // generate a normal volume
  81. err = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation)
  82. if err != nil {
  83. return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err)
  84. }
  85. // delete the previous ec shards
  86. err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
  87. if err != nil {
  88. return fmt.Errorf("delete ec shards for volume %d: %v", vid, err)
  89. }
  90. return nil
  91. }
  92. func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection string, targetNodeLocation pb.ServerAddress, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, vid needle.VolumeId) error {
  93. // mount volume
  94. if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  95. _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
  96. VolumeId: uint32(vid),
  97. })
  98. return mountErr
  99. }); err != nil {
  100. return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err)
  101. }
  102. // unmount ec shards
  103. for location, ecIndexBits := range nodeToEcIndexBits {
  104. fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
  105. err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
  106. if err != nil {
  107. return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err)
  108. }
  109. }
  110. // delete ec shards
  111. for location, ecIndexBits := range nodeToEcIndexBits {
  112. fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
  113. err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
  114. if err != nil {
  115. return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err)
  116. }
  117. }
  118. return nil
  119. }
  120. func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {
  121. fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer)
  122. err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  123. _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{
  124. VolumeId: uint32(vid),
  125. Collection: collection,
  126. })
  127. return genErr
  128. })
  129. return err
  130. }
  131. func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation pb.ServerAddress, err error) {
  132. maxShardCount := 0
  133. var exisitngEcIndexBits erasure_coding.ShardBits
  134. for loc, ecIndexBits := range nodeToEcIndexBits {
  135. toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount()
  136. if toBeCopiedShardCount > maxShardCount {
  137. maxShardCount = toBeCopiedShardCount
  138. targetNodeLocation = loc
  139. exisitngEcIndexBits = ecIndexBits
  140. }
  141. }
  142. fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits)
  143. var copiedEcIndexBits erasure_coding.ShardBits
  144. for loc, ecIndexBits := range nodeToEcIndexBits {
  145. if loc == targetNodeLocation {
  146. continue
  147. }
  148. needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards()
  149. if needToCopyEcIndexBits.ShardIdCount() == 0 {
  150. continue
  151. }
  152. err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  153. fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)
  154. _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
  155. VolumeId: uint32(vid),
  156. Collection: collection,
  157. ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
  158. CopyEcxFile: false,
  159. CopyEcjFile: true,
  160. CopyVifFile: true,
  161. SourceDataNode: string(loc),
  162. })
  163. if copyErr != nil {
  164. return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr)
  165. }
  166. return nil
  167. })
  168. if err != nil {
  169. break
  170. }
  171. copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits)
  172. }
  173. nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits)
  174. return targetNodeLocation, err
  175. }
  176. func lookupVolumeIds(commandEnv *CommandEnv, volumeIds []string) (volumeIdLocations []*master_pb.LookupVolumeResponse_VolumeIdLocation, err error) {
  177. var resp *master_pb.LookupVolumeResponse
  178. err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
  179. resp, err = client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{VolumeOrFileIds: volumeIds})
  180. return err
  181. })
  182. if err != nil {
  183. return nil, err
  184. }
  185. return resp.VolumeIdLocations, nil
  186. }
  187. func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {
  188. var resp *master_pb.VolumeListResponse
  189. err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
  190. resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
  191. return err
  192. })
  193. if err != nil {
  194. return
  195. }
  196. return resp.TopologyInfo, resp.VolumeSizeLimitMb, nil
  197. }
  198. func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {
  199. vidMap := make(map[uint32]bool)
  200. eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  201. if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
  202. for _, v := range diskInfo.EcShardInfos {
  203. if v.Collection == selectedCollection {
  204. vidMap[v.Id] = true
  205. }
  206. }
  207. }
  208. })
  209. for vid := range vidMap {
  210. vids = append(vids, needle.VolumeId(vid))
  211. }
  212. return
  213. }
  214. func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[pb.ServerAddress]erasure_coding.ShardBits {
  215. nodeToEcIndexBits := make(map[pb.ServerAddress]erasure_coding.ShardBits)
  216. eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  217. if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
  218. for _, v := range diskInfo.EcShardInfos {
  219. if v.Id == uint32(vid) {
  220. nodeToEcIndexBits[pb.NewServerAddressFromDataNode(dn)] = erasure_coding.ShardBits(v.EcIndexBits)
  221. }
  222. }
  223. }
  224. })
  225. return nodeToEcIndexBits
  226. }