You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

349 lines
10 KiB

6 years ago
6 years ago
6 years ago
6 years ago
3 years ago
3 years ago
3 years ago
  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/storage"
  6. "path/filepath"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/cluster"
  9. "github.com/seaweedfs/seaweedfs/weed/pb"
  10. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  11. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  12. "github.com/seaweedfs/seaweedfs/weed/util"
  13. "github.com/seaweedfs/seaweedfs/weed/glog"
  14. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  15. "github.com/seaweedfs/seaweedfs/weed/stats"
  16. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  17. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  18. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  19. )
  20. func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) {
  21. resp := &volume_server_pb.DeleteCollectionResponse{}
  22. err := vs.store.DeleteCollection(req.Collection)
  23. if err != nil {
  24. glog.Errorf("delete collection %s: %v", req.Collection, err)
  25. } else {
  26. glog.V(2).Infof("delete collection %v", req)
  27. }
  28. return resp, err
  29. }
  30. func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_pb.AllocateVolumeRequest) (*volume_server_pb.AllocateVolumeResponse, error) {
  31. resp := &volume_server_pb.AllocateVolumeResponse{}
  32. err := vs.store.AddVolume(
  33. needle.VolumeId(req.VolumeId),
  34. req.Collection,
  35. vs.needleMapKind,
  36. req.Replication,
  37. req.Ttl,
  38. req.Preallocate,
  39. req.MemoryMapMaxSizeMb,
  40. types.ToDiskType(req.DiskType),
  41. )
  42. if err != nil {
  43. glog.Errorf("assign volume %v: %v", req, err)
  44. } else {
  45. glog.V(2).Infof("assign volume %v", req)
  46. }
  47. return resp, err
  48. }
  49. func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.VolumeMountRequest) (*volume_server_pb.VolumeMountResponse, error) {
  50. resp := &volume_server_pb.VolumeMountResponse{}
  51. err := vs.store.MountVolume(needle.VolumeId(req.VolumeId))
  52. if err != nil {
  53. glog.Errorf("volume mount %v: %v", req, err)
  54. } else {
  55. glog.V(2).Infof("volume mount %v", req)
  56. }
  57. return resp, err
  58. }
  59. func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb.VolumeUnmountRequest) (*volume_server_pb.VolumeUnmountResponse, error) {
  60. resp := &volume_server_pb.VolumeUnmountResponse{}
  61. err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
  62. if err != nil {
  63. glog.Errorf("volume unmount %v: %v", req, err)
  64. } else {
  65. glog.V(2).Infof("volume unmount %v", req)
  66. }
  67. return resp, err
  68. }
  69. func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb.VolumeDeleteRequest) (*volume_server_pb.VolumeDeleteResponse, error) {
  70. resp := &volume_server_pb.VolumeDeleteResponse{}
  71. err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
  72. if err != nil {
  73. glog.Errorf("volume delete %v: %v", req, err)
  74. } else {
  75. glog.V(2).Infof("volume delete %v", req)
  76. }
  77. return resp, err
  78. }
  79. func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) {
  80. resp := &volume_server_pb.VolumeConfigureResponse{}
  81. // check replication format
  82. if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil {
  83. resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err)
  84. return resp, nil
  85. }
  86. // unmount
  87. if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil {
  88. glog.Errorf("volume configure unmount %v: %v", req, err)
  89. resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err)
  90. return resp, nil
  91. }
  92. // modify the volume info file
  93. if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil {
  94. glog.Errorf("volume configure %v: %v", req, err)
  95. resp.Error = fmt.Sprintf("volume configure %v: %v", req, err)
  96. return resp, nil
  97. }
  98. // mount
  99. if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil {
  100. glog.Errorf("volume configure mount %v: %v", req, err)
  101. resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err)
  102. return resp, nil
  103. }
  104. return resp, nil
  105. }
  106. func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) {
  107. resp := &volume_server_pb.VolumeMarkReadonlyResponse{}
  108. v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
  109. if v == nil {
  110. return nil, fmt.Errorf("volume %d not found", req.VolumeId)
  111. }
  112. // step 1: stop master from redirecting traffic here
  113. if err := vs.notifyMasterVolumeReadonly(v, true); err != nil {
  114. return resp, err
  115. }
  116. // rare case 1.5: it will be unlucky if heartbeat happened between step 1 and 2.
  117. // step 2: mark local volume as readonly
  118. err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId))
  119. if err != nil {
  120. glog.Errorf("volume mark readonly %v: %v", req, err)
  121. } else {
  122. glog.V(2).Infof("volume mark readonly %v", req)
  123. }
  124. // step 3: tell master from redirecting traffic here again, to prevent rare case 1.5
  125. if err := vs.notifyMasterVolumeReadonly(v, true); err != nil {
  126. return resp, err
  127. }
  128. return resp, err
  129. }
  130. func (vs *VolumeServer) notifyMasterVolumeReadonly(v *storage.Volume, isReadOnly bool) error {
  131. if grpcErr := pb.WithMasterClient(false, vs.GetMaster(), vs.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  132. _, err := client.VolumeMarkReadonly(context.Background(), &master_pb.VolumeMarkReadonlyRequest{
  133. Ip: vs.store.Ip,
  134. Port: uint32(vs.store.Port),
  135. VolumeId: uint32(v.Id),
  136. Collection: v.Collection,
  137. ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
  138. Ttl: v.Ttl.ToUint32(),
  139. DiskType: string(v.DiskType()),
  140. IsReadonly: isReadOnly,
  141. })
  142. if err != nil {
  143. return fmt.Errorf("set volume %d to read only on master: %v", v.Id, err)
  144. }
  145. return nil
  146. }); grpcErr != nil {
  147. glog.V(0).Infof("connect to %s: %v", vs.GetMaster(), grpcErr)
  148. return fmt.Errorf("grpc VolumeMarkReadonly with master %s: %v", vs.GetMaster(), grpcErr)
  149. }
  150. return nil
  151. }
  152. func (vs *VolumeServer) VolumeMarkWritable(ctx context.Context, req *volume_server_pb.VolumeMarkWritableRequest) (*volume_server_pb.VolumeMarkWritableResponse, error) {
  153. resp := &volume_server_pb.VolumeMarkWritableResponse{}
  154. v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
  155. if v == nil {
  156. return nil, fmt.Errorf("volume %d not found", req.VolumeId)
  157. }
  158. err := vs.store.MarkVolumeWritable(needle.VolumeId(req.VolumeId))
  159. if err != nil {
  160. glog.Errorf("volume mark writable %v: %v", req, err)
  161. } else {
  162. glog.V(2).Infof("volume mark writable %v", req)
  163. }
  164. // enable master to redirect traffic here
  165. if err := vs.notifyMasterVolumeReadonly(v, false); err != nil {
  166. return resp, err
  167. }
  168. return resp, err
  169. }
  170. func (vs *VolumeServer) VolumeStatus(ctx context.Context, req *volume_server_pb.VolumeStatusRequest) (*volume_server_pb.VolumeStatusResponse, error) {
  171. resp := &volume_server_pb.VolumeStatusResponse{}
  172. v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
  173. if v == nil {
  174. return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
  175. }
  176. resp.IsReadOnly = v.IsReadOnly()
  177. return resp, nil
  178. }
  179. func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) {
  180. resp := &volume_server_pb.VolumeServerStatusResponse{
  181. MemoryStatus: stats.MemStat(),
  182. Version: util.Version(),
  183. DataCenter: vs.dataCenter,
  184. Rack: vs.rack,
  185. }
  186. for _, loc := range vs.store.Locations {
  187. if dir, e := filepath.Abs(loc.Directory); e == nil {
  188. resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir))
  189. }
  190. }
  191. return resp, nil
  192. }
  193. func (vs *VolumeServer) VolumeServerLeave(ctx context.Context, req *volume_server_pb.VolumeServerLeaveRequest) (*volume_server_pb.VolumeServerLeaveResponse, error) {
  194. resp := &volume_server_pb.VolumeServerLeaveResponse{}
  195. vs.StopHeartbeat()
  196. return resp, nil
  197. }
  198. func (vs *VolumeServer) VolumeNeedleStatus(ctx context.Context, req *volume_server_pb.VolumeNeedleStatusRequest) (*volume_server_pb.VolumeNeedleStatusResponse, error) {
  199. resp := &volume_server_pb.VolumeNeedleStatusResponse{}
  200. volumeId := needle.VolumeId(req.VolumeId)
  201. n := &needle.Needle{
  202. Id: types.NeedleId(req.NeedleId),
  203. }
  204. var count int
  205. var err error
  206. hasVolume := vs.store.HasVolume(volumeId)
  207. if !hasVolume {
  208. _, hasEcVolume := vs.store.FindEcVolume(volumeId)
  209. if !hasEcVolume {
  210. return nil, fmt.Errorf("volume not found %d", req.VolumeId)
  211. }
  212. count, err = vs.store.ReadEcShardNeedle(volumeId, n, nil)
  213. } else {
  214. count, err = vs.store.ReadVolumeNeedle(volumeId, n, nil, nil)
  215. }
  216. if err != nil {
  217. return nil, err
  218. }
  219. if count < 0 {
  220. return nil, fmt.Errorf("needle not found %d", n.Id)
  221. }
  222. resp.NeedleId = uint64(n.Id)
  223. resp.Cookie = uint32(n.Cookie)
  224. resp.Size = uint32(n.Size)
  225. resp.LastModified = n.LastModified
  226. resp.Crc = n.Checksum.Value()
  227. if n.HasTtl() {
  228. resp.Ttl = n.Ttl.String()
  229. }
  230. return resp, nil
  231. }
  232. func (vs *VolumeServer) Ping(ctx context.Context, req *volume_server_pb.PingRequest) (resp *volume_server_pb.PingResponse, pingErr error) {
  233. resp = &volume_server_pb.PingResponse{
  234. StartTimeNs: time.Now().UnixNano(),
  235. }
  236. if req.TargetType == cluster.FilerType {
  237. pingErr = pb.WithFilerClient(false, pb.ServerAddress(req.Target), vs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
  238. pingResp, err := client.Ping(ctx, &filer_pb.PingRequest{})
  239. if pingResp != nil {
  240. resp.RemoteTimeNs = pingResp.StartTimeNs
  241. }
  242. return err
  243. })
  244. }
  245. if req.TargetType == cluster.VolumeServerType {
  246. pingErr = pb.WithVolumeServerClient(false, pb.ServerAddress(req.Target), vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
  247. pingResp, err := client.Ping(ctx, &volume_server_pb.PingRequest{})
  248. if pingResp != nil {
  249. resp.RemoteTimeNs = pingResp.StartTimeNs
  250. }
  251. return err
  252. })
  253. }
  254. if req.TargetType == cluster.MasterType {
  255. pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), vs.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  256. pingResp, err := client.Ping(ctx, &master_pb.PingRequest{})
  257. if pingResp != nil {
  258. resp.RemoteTimeNs = pingResp.StartTimeNs
  259. }
  260. return err
  261. })
  262. }
  263. if pingErr != nil {
  264. pingErr = fmt.Errorf("ping %s %s: %v", req.TargetType, req.Target, pingErr)
  265. }
  266. resp.StopTimeNs = time.Now().UnixNano()
  267. return
  268. }