You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

196 lines
6.2 KiB

6 years ago
6 years ago
  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "math"
  7. "os"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/operation"
  10. "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
  11. "github.com/chrislusf/seaweedfs/weed/storage"
  12. "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
  13. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  14. )
  15. /*
  16. Steps to apply erasure coding to .dat .idx files
  17. 0. ensure the volume is readonly
  18. 1. client call VolumeEcShardsGenerate to generate the .ecx and .ec01~.ec14 files
  19. 2. client ask master for possible servers to hold the ec files, at least 4 servers
  20. 3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server
  21. 4. target servers report the new ec files to the master
  22. 5. master stores vid -> [14]*DataNode
  23. 6. client checks master. If all 14 slices are ready, delete the original .idx, .idx files
  24. */
  25. // VolumeEcShardsGenerate generates the .ecx and .ec01 ~ .ec14 files
  26. func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) {
  27. v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
  28. if v == nil {
  29. return nil, fmt.Errorf("volume %d not found", req.VolumeId)
  30. }
  31. baseFileName := v.FileName()
  32. if v.Collection != req.Collection {
  33. return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
  34. }
  35. // write .ecx file
  36. if err := erasure_coding.WriteSortedEcxFile(baseFileName); err != nil {
  37. return nil, fmt.Errorf("WriteSortedEcxFile %s: %v", baseFileName, err)
  38. }
  39. // write .ec01 ~ .ec14 files
  40. if err := erasure_coding.WriteEcFiles(baseFileName); err != nil {
  41. return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
  42. }
  43. return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil
  44. }
  45. // VolumeEcShardsCopy copy the .ecx and some ec data slices
  46. func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) {
  47. location := vs.store.FindFreeLocation()
  48. if location == nil {
  49. return nil, fmt.Errorf("no space left")
  50. }
  51. baseFileName := storage.VolumeFileName(req.Collection, location.Directory, int(req.VolumeId))
  52. err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
  53. // copy ecx file
  54. if err := vs.doCopyFile(ctx, client, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx"); err != nil {
  55. return err
  56. }
  57. // copy ec data slices
  58. for _, shardId := range req.ShardIds {
  59. if err := vs.doCopyFile(ctx, client, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId))); err != nil {
  60. return err
  61. }
  62. }
  63. return nil
  64. })
  65. if err != nil {
  66. return nil, fmt.Errorf("VolumeEcShardsCopy volume %d: %v", req.VolumeId, err)
  67. }
  68. return &volume_server_pb.VolumeEcShardsCopyResponse{}, nil
  69. }
  70. // VolumeEcShardsDelete local delete the .ecx and some ec data slices if not needed, assuming current server has the source volume
  71. func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) {
  72. v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
  73. if v == nil {
  74. return nil, fmt.Errorf("volume %d not found", req.VolumeId)
  75. }
  76. baseFileName := v.FileName()
  77. for _, shardId := range req.ShardIds {
  78. if err := os.Remove(baseFileName + erasure_coding.ToExt(int(shardId))); err != nil {
  79. return nil, err
  80. }
  81. }
  82. if req.ShouldDeleteEcx {
  83. if err := os.Remove(baseFileName + ".ecx"); err != nil {
  84. return nil, err
  85. }
  86. }
  87. return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil
  88. }
  89. func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) {
  90. for _, shardId := range req.ShardIds {
  91. err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
  92. if err != nil {
  93. glog.Errorf("ec shard mount %v: %v", req, err)
  94. } else {
  95. glog.V(2).Infof("ec shard mount %v", req)
  96. }
  97. if err != nil {
  98. return nil, fmt.Errorf("mount %d.%d: %v", req.VolumeId, shardId, err)
  99. }
  100. }
  101. return &volume_server_pb.VolumeEcShardsMountResponse{}, nil
  102. }
  103. func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) {
  104. for _, shardId := range req.ShardIds {
  105. err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
  106. if err != nil {
  107. glog.Errorf("ec shard unmount %v: %v", req, err)
  108. } else {
  109. glog.V(2).Infof("ec shard unmount %v", req)
  110. }
  111. if err != nil {
  112. return nil, fmt.Errorf("unmount %d.%d: %v", req.VolumeId, shardId, err)
  113. }
  114. }
  115. return &volume_server_pb.VolumeEcShardsUnmountResponse{}, nil
  116. }
  117. func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardReadRequest, stream volume_server_pb.VolumeServer_VolumeEcShardReadServer) error {
  118. ecVolume, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId))
  119. if !found {
  120. return fmt.Errorf("not found ec volume id %d", req.VolumeId)
  121. }
  122. ecShard, found := ecVolume.FindEcVolumeShard(erasure_coding.ShardId(req.ShardId))
  123. if !found {
  124. return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId)
  125. }
  126. buffer := make([]byte, BufferSizeLimit)
  127. startOffset, bytesToRead := req.Offset, req.Size
  128. for bytesToRead > 0 {
  129. bytesread, err := ecShard.ReadAt(buffer, startOffset)
  130. // println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
  131. if err != nil {
  132. if err != io.EOF {
  133. return err
  134. }
  135. // println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error())
  136. break
  137. }
  138. if int64(bytesread) > bytesToRead {
  139. bytesread = int(bytesToRead)
  140. }
  141. err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
  142. Data: buffer[:bytesread],
  143. })
  144. if err != nil {
  145. // println("sending", bytesread, "bytes err", err.Error())
  146. return err
  147. }
  148. bytesToRead -= int64(bytesread)
  149. }
  150. return nil
  151. }