You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

149 lines
5.0 KiB

  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "math"
  6. "os"
  7. "github.com/chrislusf/seaweedfs/weed/glog"
  8. "github.com/chrislusf/seaweedfs/weed/operation"
  9. "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
  10. "github.com/chrislusf/seaweedfs/weed/storage"
  11. "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
  12. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  13. )
  14. /*
  15. Steps to apply erasure coding to .dat .idx files
  16. 0. ensure the volume is readonly
  17. 1. client call VolumeEcShardsGenerate to generate the .ecx and .ec01~.ec14 files
  18. 2. client ask master for possible servers to hold the ec files, at least 4 servers
  19. 3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server
  20. 4. target servers report the new ec files to the master
  21. 5. master stores vid -> [14]*DataNode
  22. 6. client checks master. If all 14 slices are ready, delete the original .idx, .idx files
  23. */
  24. // VolumeEcShardsGenerate generates the .ecx and .ec01 ~ .ec14 files
  25. func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) {
  26. v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
  27. if v == nil {
  28. return nil, fmt.Errorf("volume %d not found", req.VolumeId)
  29. }
  30. baseFileName := v.FileName()
  31. if v.Collection != req.Collection {
  32. return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
  33. }
  34. // write .ecx file
  35. if err := erasure_coding.WriteSortedEcxFile(baseFileName); err != nil {
  36. return nil, fmt.Errorf("WriteSortedEcxFile %s: %v", baseFileName, err)
  37. }
  38. // write .ec01 ~ .ec14 files
  39. if err := erasure_coding.WriteEcFiles(baseFileName); err != nil {
  40. return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
  41. }
  42. return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil
  43. }
  44. // VolumeEcShardsCopy copy the .ecx and some ec data slices
  45. func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) {
  46. location := vs.store.FindFreeLocation()
  47. if location == nil {
  48. return nil, fmt.Errorf("no space left")
  49. }
  50. baseFileName := storage.VolumeFileName(req.Collection, location.Directory, int(req.VolumeId))
  51. err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
  52. // copy ecx file
  53. if err := vs.doCopyFile(ctx, client, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx"); err != nil {
  54. return err
  55. }
  56. // copy ec data slices
  57. for _, ecIndex := range req.EcIndexes {
  58. if err := vs.doCopyFile(ctx, client, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(ecIndex))); err != nil {
  59. return err
  60. }
  61. }
  62. return nil
  63. })
  64. if err != nil {
  65. return nil, fmt.Errorf("VolumeEcShardsCopy volume %d: %v", req.VolumeId, err)
  66. }
  67. return &volume_server_pb.VolumeEcShardsCopyResponse{}, nil
  68. }
  69. // VolumeEcShardsDelete local delete the .ecx and some ec data slices if not needed, assuming current server has the source volume
  70. func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) {
  71. v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
  72. if v == nil {
  73. return nil, fmt.Errorf("volume %d not found", req.VolumeId)
  74. }
  75. baseFileName := v.FileName()
  76. for _, shardIndex := range req.EcIndexes {
  77. if err := os.Remove(baseFileName + erasure_coding.ToExt(int(shardIndex))); err != nil {
  78. return nil, err
  79. }
  80. }
  81. if req.ShouldDeleteEcx {
  82. if err := os.Remove(baseFileName + ".ecx"); err != nil {
  83. return nil, err
  84. }
  85. }
  86. return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil
  87. }
  88. func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) {
  89. for _, shardId := range req.EcIndexes {
  90. err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
  91. if err != nil {
  92. glog.Errorf("ec shard mount %v: %v", req, err)
  93. } else {
  94. glog.V(2).Infof("ec shard mount %v", req)
  95. }
  96. if err != nil {
  97. return nil, fmt.Errorf("mount %d.%d: %v", req.VolumeId, shardId, err)
  98. }
  99. }
  100. return &volume_server_pb.VolumeEcShardsMountResponse{}, nil
  101. }
  102. func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) {
  103. for _, shardId := range req.EcIndexes {
  104. err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
  105. if err != nil {
  106. glog.Errorf("ec shard unmount %v: %v", req, err)
  107. } else {
  108. glog.V(2).Infof("ec shard unmount %v", req)
  109. }
  110. if err != nil {
  111. return nil, fmt.Errorf("unmount %d.%d: %v", req.VolumeId, shardId, err)
  112. }
  113. }
  114. return &volume_server_pb.VolumeEcShardsUnmountResponse{}, nil
  115. }