You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

243 lines
6.9 KiB

9 months ago
3 years ago
6 years ago
6 years ago
3 years ago
6 years ago
  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "reflect"
  6. "strings"
  7. "sync"
  8. "time"
  9. "github.com/seaweedfs/raft"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  12. "github.com/seaweedfs/seaweedfs/weed/security"
  13. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  14. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  15. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  16. )
  17. func (ms *MasterServer) ProcessGrowRequest() {
  18. go func() {
  19. filter := sync.Map{}
  20. for {
  21. req, ok := <-ms.volumeGrowthRequestChan
  22. if !ok {
  23. break
  24. }
  25. option := req.Option
  26. vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
  27. if !ms.Topo.IsLeader() {
  28. //discard buffered requests
  29. time.Sleep(time.Second * 1)
  30. vl.DoneGrowRequest()
  31. continue
  32. }
  33. // filter out identical requests being processed
  34. found := false
  35. filter.Range(func(k, v interface{}) bool {
  36. if reflect.DeepEqual(k, req) {
  37. found = true
  38. }
  39. return !found
  40. })
  41. // not atomic but it's okay
  42. if !found && vl.ShouldGrowVolumes(option) {
  43. filter.Store(req, nil)
  44. // we have lock called inside vg
  45. go func() {
  46. glog.V(1).Infoln("starting automatic volume grow")
  47. start := time.Now()
  48. newVidLocations, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count)
  49. glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start))
  50. if err == nil {
  51. for _, newVidLocation := range newVidLocations {
  52. ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: newVidLocation})
  53. }
  54. } else {
  55. glog.V(1).Infof("automatic volume grow failed: %+v", err)
  56. }
  57. vl.DoneGrowRequest()
  58. filter.Delete(req)
  59. }()
  60. } else {
  61. glog.V(4).Infoln("discard volume grow request")
  62. time.Sleep(time.Millisecond * 211)
  63. vl.DoneGrowRequest()
  64. }
  65. }
  66. }()
  67. }
  68. func (ms *MasterServer) LookupVolume(ctx context.Context, req *master_pb.LookupVolumeRequest) (*master_pb.LookupVolumeResponse, error) {
  69. resp := &master_pb.LookupVolumeResponse{}
  70. volumeLocations := ms.lookupVolumeId(req.VolumeOrFileIds, req.Collection)
  71. for _, volumeOrFileId := range req.VolumeOrFileIds {
  72. vid := volumeOrFileId
  73. commaSep := strings.Index(vid, ",")
  74. if commaSep > 0 {
  75. vid = vid[0:commaSep]
  76. }
  77. if result, found := volumeLocations[vid]; found {
  78. var locations []*master_pb.Location
  79. for _, loc := range result.Locations {
  80. locations = append(locations, &master_pb.Location{
  81. Url: loc.Url,
  82. PublicUrl: loc.PublicUrl,
  83. DataCenter: loc.DataCenter,
  84. })
  85. }
  86. var auth string
  87. if commaSep > 0 { // this is a file id
  88. auth = string(security.GenJwtForVolumeServer(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, result.VolumeOrFileId))
  89. }
  90. resp.VolumeIdLocations = append(resp.VolumeIdLocations, &master_pb.LookupVolumeResponse_VolumeIdLocation{
  91. VolumeOrFileId: result.VolumeOrFileId,
  92. Locations: locations,
  93. Error: result.Error,
  94. Auth: auth,
  95. })
  96. }
  97. }
  98. return resp, nil
  99. }
  100. func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.StatisticsRequest) (*master_pb.StatisticsResponse, error) {
  101. if !ms.Topo.IsLeader() {
  102. return nil, raft.NotLeaderError
  103. }
  104. if req.Replication == "" {
  105. req.Replication = ms.option.DefaultReplicaPlacement
  106. }
  107. replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication)
  108. if err != nil {
  109. return nil, err
  110. }
  111. ttl, err := needle.ReadTTL(req.Ttl)
  112. if err != nil {
  113. return nil, err
  114. }
  115. volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl, types.ToDiskType(req.DiskType))
  116. stats := volumeLayout.Stats()
  117. totalSize := ms.Topo.GetDiskUsages().GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
  118. resp := &master_pb.StatisticsResponse{
  119. TotalSize: uint64(totalSize),
  120. UsedSize: stats.UsedSize,
  121. FileCount: stats.FileCount,
  122. }
  123. return resp, nil
  124. }
  125. func (ms *MasterServer) VolumeList(ctx context.Context, req *master_pb.VolumeListRequest) (*master_pb.VolumeListResponse, error) {
  126. if !ms.Topo.IsLeader() {
  127. return nil, raft.NotLeaderError
  128. }
  129. resp := &master_pb.VolumeListResponse{
  130. TopologyInfo: ms.Topo.ToTopologyInfo(),
  131. VolumeSizeLimitMb: uint64(ms.option.VolumeSizeLimitMB),
  132. }
  133. return resp, nil
  134. }
  135. func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.LookupEcVolumeRequest) (*master_pb.LookupEcVolumeResponse, error) {
  136. if !ms.Topo.IsLeader() {
  137. return nil, raft.NotLeaderError
  138. }
  139. resp := &master_pb.LookupEcVolumeResponse{}
  140. ecLocations, found := ms.Topo.LookupEcShards(needle.VolumeId(req.VolumeId))
  141. if !found {
  142. return resp, fmt.Errorf("ec volume %d not found", req.VolumeId)
  143. }
  144. resp.VolumeId = req.VolumeId
  145. for shardId, shardLocations := range ecLocations.Locations {
  146. var locations []*master_pb.Location
  147. for _, dn := range shardLocations {
  148. locations = append(locations, &master_pb.Location{
  149. Url: string(dn.Id()),
  150. PublicUrl: dn.PublicUrl,
  151. DataCenter: dn.GetDataCenterId(),
  152. })
  153. }
  154. resp.ShardIdLocations = append(resp.ShardIdLocations, &master_pb.LookupEcVolumeResponse_EcShardIdLocation{
  155. ShardId: uint32(shardId),
  156. Locations: locations,
  157. })
  158. }
  159. return resp, nil
  160. }
  161. func (ms *MasterServer) VacuumVolume(ctx context.Context, req *master_pb.VacuumVolumeRequest) (*master_pb.VacuumVolumeResponse, error) {
  162. if !ms.Topo.IsLeader() {
  163. return nil, raft.NotLeaderError
  164. }
  165. resp := &master_pb.VacuumVolumeResponse{}
  166. ms.Topo.Vacuum(ms.grpcDialOption, float64(req.GarbageThreshold), req.VolumeId, req.Collection, ms.preallocateSize)
  167. return resp, nil
  168. }
  169. func (ms *MasterServer) DisableVacuum(ctx context.Context, req *master_pb.DisableVacuumRequest) (*master_pb.DisableVacuumResponse, error) {
  170. ms.Topo.DisableVacuum()
  171. resp := &master_pb.DisableVacuumResponse{}
  172. return resp, nil
  173. }
  174. func (ms *MasterServer) EnableVacuum(ctx context.Context, req *master_pb.EnableVacuumRequest) (*master_pb.EnableVacuumResponse, error) {
  175. ms.Topo.EnableVacuum()
  176. resp := &master_pb.EnableVacuumResponse{}
  177. return resp, nil
  178. }
  179. func (ms *MasterServer) VolumeMarkReadonly(ctx context.Context, req *master_pb.VolumeMarkReadonlyRequest) (*master_pb.VolumeMarkReadonlyResponse, error) {
  180. if !ms.Topo.IsLeader() {
  181. return nil, raft.NotLeaderError
  182. }
  183. resp := &master_pb.VolumeMarkReadonlyResponse{}
  184. replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(req.ReplicaPlacement))
  185. vl := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, needle.LoadTTLFromUint32(req.Ttl), types.ToDiskType(req.DiskType))
  186. dataNodes := ms.Topo.Lookup(req.Collection, needle.VolumeId(req.VolumeId))
  187. for _, dn := range dataNodes {
  188. if dn.Ip == req.Ip && dn.Port == int(req.Port) {
  189. if req.IsReadonly {
  190. vl.SetVolumeReadOnly(dn, needle.VolumeId(req.VolumeId))
  191. } else {
  192. vl.SetVolumeWritable(dn, needle.VolumeId(req.VolumeId))
  193. }
  194. }
  195. }
  196. return resp, nil
  197. }