You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

296 lines
9.5 KiB

3 months ago
4 years ago
3 years ago
3 years ago
3 years ago
4 years ago
  1. package shell
  2. import (
  3. "context"
  4. "flag"
  5. "fmt"
  6. "github.com/seaweedfs/seaweedfs/weed/pb"
  7. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  8. "io"
  9. "time"
  10. "google.golang.org/grpc"
  11. "github.com/seaweedfs/seaweedfs/weed/operation"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
  15. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  16. )
  17. func init() {
  18. Commands = append(Commands, &commandEcDecode{})
  19. }
  20. type commandEcDecode struct {
  21. }
  22. func (c *commandEcDecode) Name() string {
  23. return "ec.decode"
  24. }
  25. func (c *commandEcDecode) Help() string {
  26. return `decode a erasure coded volume into a normal volume
  27. ec.decode [-collection=""] [-volumeId=<volume_id>]
  28. `
  29. }
  30. func (c *commandEcDecode) HasTag(CommandTag) bool {
  31. return false
  32. }
  33. func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  34. decodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  35. volumeId := decodeCommand.Int("volumeId", 0, "the volume id")
  36. collection := decodeCommand.String("collection", "", "the collection name")
  37. if err = decodeCommand.Parse(args); err != nil {
  38. return nil
  39. }
  40. if err = commandEnv.confirmIsLocked(args); err != nil {
  41. return
  42. }
  43. vid := needle.VolumeId(*volumeId)
  44. // collect topology information
  45. topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
  46. if err != nil {
  47. return err
  48. }
  49. // volumeId is provided
  50. if vid != 0 {
  51. return doEcDecode(commandEnv, topologyInfo, *collection, vid)
  52. }
  53. // apply to all volumes in the collection
  54. volumeIds := collectEcShardIds(topologyInfo, *collection)
  55. fmt.Printf("ec encode volumes: %v\n", volumeIds)
  56. for _, vid := range volumeIds {
  57. if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {
  58. return err
  59. }
  60. }
  61. return nil
  62. }
  63. func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
  64. if !commandEnv.isLocked() {
  65. return fmt.Errorf("lock is lost")
  66. }
  67. // find volume location
  68. nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)
  69. fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits)
  70. // collect ec shards to the server with most space
  71. targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)
  72. if err != nil {
  73. return fmt.Errorf("collectEcShards for volume %d: %v", vid, err)
  74. }
  75. // generate a normal volume
  76. err = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation)
  77. if err != nil {
  78. return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err)
  79. }
  80. // delete the previous ec shards
  81. err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
  82. if err != nil {
  83. return fmt.Errorf("delete ec shards for volume %d: %v", vid, err)
  84. }
  85. return nil
  86. }
  87. func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection string, targetNodeLocation pb.ServerAddress, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, vid needle.VolumeId) error {
  88. // mount volume
  89. if err := operation.WithVolumeServerClient(false, targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  90. _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
  91. VolumeId: uint32(vid),
  92. })
  93. return mountErr
  94. }); err != nil {
  95. return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err)
  96. }
  97. // unmount ec shards
  98. for location, ecIndexBits := range nodeToEcIndexBits {
  99. fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
  100. err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
  101. if err != nil {
  102. return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err)
  103. }
  104. }
  105. // delete ec shards
  106. for location, ecIndexBits := range nodeToEcIndexBits {
  107. fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
  108. err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
  109. if err != nil {
  110. return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err)
  111. }
  112. }
  113. return nil
  114. }
  115. func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {
  116. fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer)
  117. err := operation.WithVolumeServerClient(false, sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  118. _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{
  119. VolumeId: uint32(vid),
  120. Collection: collection,
  121. })
  122. return genErr
  123. })
  124. return err
  125. }
  126. func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation pb.ServerAddress, err error) {
  127. maxShardCount := 0
  128. var existingEcIndexBits erasure_coding.ShardBits
  129. for loc, ecIndexBits := range nodeToEcIndexBits {
  130. toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount()
  131. if toBeCopiedShardCount > maxShardCount {
  132. maxShardCount = toBeCopiedShardCount
  133. targetNodeLocation = loc
  134. existingEcIndexBits = ecIndexBits
  135. }
  136. }
  137. fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits)
  138. var copiedEcIndexBits erasure_coding.ShardBits
  139. for loc, ecIndexBits := range nodeToEcIndexBits {
  140. if loc == targetNodeLocation {
  141. continue
  142. }
  143. needToCopyEcIndexBits := ecIndexBits.Minus(existingEcIndexBits).MinusParityShards()
  144. if needToCopyEcIndexBits.ShardIdCount() == 0 {
  145. continue
  146. }
  147. err = operation.WithVolumeServerClient(false, targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  148. fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)
  149. _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
  150. VolumeId: uint32(vid),
  151. Collection: collection,
  152. ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
  153. CopyEcxFile: false,
  154. CopyEcjFile: true,
  155. CopyVifFile: true,
  156. SourceDataNode: string(loc),
  157. })
  158. if copyErr != nil {
  159. return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr)
  160. }
  161. fmt.Printf("mount %d.%v on %s\n", vid, needToCopyEcIndexBits.ShardIds(), targetNodeLocation)
  162. _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
  163. VolumeId: uint32(vid),
  164. Collection: collection,
  165. ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
  166. })
  167. if mountErr != nil {
  168. return fmt.Errorf("mount %d.%v on %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), targetNodeLocation, mountErr)
  169. }
  170. return nil
  171. })
  172. if err != nil {
  173. break
  174. }
  175. copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits)
  176. }
  177. nodeToEcIndexBits[targetNodeLocation] = existingEcIndexBits.Plus(copiedEcIndexBits)
  178. return targetNodeLocation, err
  179. }
  180. func lookupVolumeIds(commandEnv *CommandEnv, volumeIds []string) (volumeIdLocations []*master_pb.LookupVolumeResponse_VolumeIdLocation, err error) {
  181. var resp *master_pb.LookupVolumeResponse
  182. err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
  183. resp, err = client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{VolumeOrFileIds: volumeIds})
  184. return err
  185. })
  186. if err != nil {
  187. return nil, err
  188. }
  189. return resp.VolumeIdLocations, nil
  190. }
  191. func collectTopologyInfo(commandEnv *CommandEnv, delayBeforeCollecting time.Duration) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {
  192. if delayBeforeCollecting > 0 {
  193. time.Sleep(delayBeforeCollecting)
  194. }
  195. var resp *master_pb.VolumeListResponse
  196. err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
  197. resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
  198. return err
  199. })
  200. if err != nil {
  201. return
  202. }
  203. return resp.TopologyInfo, resp.VolumeSizeLimitMb, nil
  204. }
  205. func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {
  206. vidMap := make(map[uint32]bool)
  207. eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  208. if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
  209. for _, v := range diskInfo.EcShardInfos {
  210. if v.Collection == selectedCollection {
  211. vidMap[v.Id] = true
  212. }
  213. }
  214. }
  215. })
  216. for vid := range vidMap {
  217. vids = append(vids, needle.VolumeId(vid))
  218. }
  219. return
  220. }
  221. func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[pb.ServerAddress]erasure_coding.ShardBits {
  222. nodeToEcIndexBits := make(map[pb.ServerAddress]erasure_coding.ShardBits)
  223. eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
  224. if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
  225. for _, v := range diskInfo.EcShardInfos {
  226. if v.Id == uint32(vid) {
  227. nodeToEcIndexBits[pb.NewServerAddressFromDataNode(dn)] = erasure_coding.ShardBits(v.EcIndexBits)
  228. }
  229. }
  230. }
  231. })
  232. return nodeToEcIndexBits
  233. }