You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

397 lines
13 KiB

3 years ago
3 years ago
4 years ago
6 years ago
5 years ago
6 years ago
6 years ago
6 years ago
3 years ago
6 years ago
4 years ago
6 years ago
6 years ago
6 years ago
6 years ago
4 years ago
6 years ago
6 years ago
3 years ago
3 years ago
2 years ago
3 years ago
4 years ago
5 years ago
  1. package weed_server
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "github.com/seaweedfs/seaweedfs/weed/cluster"
  7. "net"
  8. "sort"
  9. "time"
  10. "github.com/seaweedfs/seaweedfs/weed/pb"
  11. "github.com/seaweedfs/seaweedfs/weed/stats"
  12. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  13. "github.com/seaweedfs/seaweedfs/weed/util"
  14. "github.com/seaweedfs/raft"
  15. "google.golang.org/grpc/peer"
  16. "github.com/seaweedfs/seaweedfs/weed/glog"
  17. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  18. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  19. "github.com/seaweedfs/seaweedfs/weed/topology"
  20. )
  21. func (ms *MasterServer) RegisterUuids(heartbeat *master_pb.Heartbeat) (duplicated_uuids []string, err error) {
  22. ms.Topo.UuidAccessLock.Lock()
  23. defer ms.Topo.UuidAccessLock.Unlock()
  24. key := fmt.Sprintf("%s:%d", heartbeat.Ip, heartbeat.Port)
  25. if ms.Topo.UuidMap == nil {
  26. ms.Topo.UuidMap = make(map[string][]string)
  27. }
  28. // find whether new uuid exists
  29. for k, v := range ms.Topo.UuidMap {
  30. sort.Strings(v)
  31. for _, id := range heartbeat.LocationUuids {
  32. index := sort.SearchStrings(v, id)
  33. if index < len(v) && v[index] == id {
  34. duplicated_uuids = append(duplicated_uuids, id)
  35. glog.Errorf("directory of %s on %s has been loaded", id, k)
  36. }
  37. }
  38. }
  39. if len(duplicated_uuids) > 0 {
  40. return duplicated_uuids, errors.New("volume: Duplicated volume directories were loaded")
  41. }
  42. ms.Topo.UuidMap[key] = heartbeat.LocationUuids
  43. glog.V(0).Infof("found new uuid:%v %v , %v", key, heartbeat.LocationUuids, ms.Topo.UuidMap)
  44. return nil, nil
  45. }
  46. func (ms *MasterServer) UnRegisterUuids(ip string, port int) {
  47. ms.Topo.UuidAccessLock.Lock()
  48. defer ms.Topo.UuidAccessLock.Unlock()
  49. key := fmt.Sprintf("%s:%d", ip, port)
  50. delete(ms.Topo.UuidMap, key)
  51. glog.V(0).Infof("remove volume server %v, online volume server: %v", key, ms.Topo.UuidMap)
  52. }
  53. func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
  54. var dn *topology.DataNode
  55. defer func() {
  56. if dn != nil {
  57. dn.Counter--
  58. if dn.Counter > 0 {
  59. glog.V(0).Infof("disconnect phantom volume server %s:%d remaining %d", dn.Ip, dn.Port, dn.Counter)
  60. return
  61. }
  62. // if the volume server disconnects and reconnects quickly
  63. // the unregister and register can race with each other
  64. ms.Topo.UnRegisterDataNode(dn)
  65. glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
  66. ms.UnRegisterUuids(dn.Ip, dn.Port)
  67. message := &master_pb.VolumeLocation{
  68. Url: dn.Url(),
  69. PublicUrl: dn.PublicUrl,
  70. }
  71. for _, v := range dn.GetVolumes() {
  72. message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
  73. }
  74. for _, s := range dn.GetEcShards() {
  75. message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))
  76. }
  77. if len(message.DeletedVids) > 0 {
  78. ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
  79. }
  80. }
  81. }()
  82. for {
  83. heartbeat, err := stream.Recv()
  84. if err != nil {
  85. if dn != nil {
  86. glog.Warningf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err)
  87. } else {
  88. glog.Warningf("SendHeartbeat.Recv: %v", err)
  89. }
  90. stats.MasterReceivedHeartbeatCounter.WithLabelValues("error").Inc()
  91. return err
  92. }
  93. ms.Topo.Sequence.SetMax(heartbeat.MaxFileKey)
  94. if dn == nil {
  95. dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
  96. dc := ms.Topo.GetOrCreateDataCenter(dcName)
  97. rack := dc.GetOrCreateRack(rackName)
  98. dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), int(heartbeat.GrpcPort), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts)
  99. glog.V(0).Infof("added volume server %d: %v:%d %v", dn.Counter, heartbeat.GetIp(), heartbeat.GetPort(), heartbeat.LocationUuids)
  100. uuidlist, err := ms.RegisterUuids(heartbeat)
  101. if err != nil {
  102. if stream_err := stream.Send(&master_pb.HeartbeatResponse{
  103. DuplicatedUuids: uuidlist,
  104. }); stream_err != nil {
  105. glog.Warningf("SendHeartbeat.Send DuplicatedDirectory response to %s:%d %v", dn.Ip, dn.Port, stream_err)
  106. return stream_err
  107. }
  108. return err
  109. }
  110. if err := stream.Send(&master_pb.HeartbeatResponse{
  111. VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
  112. }); err != nil {
  113. glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err)
  114. return err
  115. }
  116. stats.MasterReceivedHeartbeatCounter.WithLabelValues("dataNode").Inc()
  117. dn.Counter++
  118. }
  119. dn.AdjustMaxVolumeCounts(heartbeat.MaxVolumeCounts)
  120. glog.V(4).Infof("master received heartbeat %s", heartbeat.String())
  121. stats.MasterReceivedHeartbeatCounter.WithLabelValues("total").Inc()
  122. var dataCenter string
  123. if dc := dn.GetDataCenter(); dc != nil {
  124. dataCenter = string(dc.Id())
  125. }
  126. message := &master_pb.VolumeLocation{
  127. Url: dn.Url(),
  128. PublicUrl: dn.PublicUrl,
  129. DataCenter: dataCenter,
  130. }
  131. if len(heartbeat.NewVolumes) > 0 {
  132. stats.FilerRequestCounter.WithLabelValues("newVolumes").Inc()
  133. }
  134. if len(heartbeat.DeletedVolumes) > 0 {
  135. stats.FilerRequestCounter.WithLabelValues("deletedVolumes").Inc()
  136. }
  137. if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 {
  138. // process delta volume ids if exists for fast volume id updates
  139. for _, volInfo := range heartbeat.NewVolumes {
  140. message.NewVids = append(message.NewVids, volInfo.Id)
  141. }
  142. for _, volInfo := range heartbeat.DeletedVolumes {
  143. message.DeletedVids = append(message.DeletedVids, volInfo.Id)
  144. }
  145. // update master internal volume layouts
  146. ms.Topo.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn)
  147. }
  148. if len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes {
  149. dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
  150. ms.Topo.DataNodeRegistration(dcName, rackName, dn)
  151. // process heartbeat.Volumes
  152. stats.MasterReceivedHeartbeatCounter.WithLabelValues("Volumes").Inc()
  153. newVolumes, deletedVolumes := ms.Topo.SyncDataNodeRegistration(heartbeat.Volumes, dn)
  154. for _, v := range newVolumes {
  155. glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url())
  156. message.NewVids = append(message.NewVids, uint32(v.Id))
  157. }
  158. for _, v := range deletedVolumes {
  159. glog.V(0).Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url())
  160. message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
  161. }
  162. }
  163. if len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 {
  164. stats.MasterReceivedHeartbeatCounter.WithLabelValues("newEcShards").Inc()
  165. // update master internal volume layouts
  166. ms.Topo.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn)
  167. for _, s := range heartbeat.NewEcShards {
  168. message.NewEcVids = append(message.NewEcVids, s.Id)
  169. }
  170. for _, s := range heartbeat.DeletedEcShards {
  171. if dn.HasEcShards(needle.VolumeId(s.Id)) {
  172. continue
  173. }
  174. message.DeletedEcVids = append(message.DeletedEcVids, s.Id)
  175. }
  176. }
  177. if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {
  178. stats.MasterReceivedHeartbeatCounter.WithLabelValues("ecShards").Inc()
  179. glog.V(4).Infof("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
  180. newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn)
  181. // broadcast the ec vid changes to master clients
  182. for _, s := range newShards {
  183. message.NewEcVids = append(message.NewEcVids, uint32(s.VolumeId))
  184. }
  185. for _, s := range deletedShards {
  186. if dn.HasVolumesById(s.VolumeId) {
  187. continue
  188. }
  189. message.DeletedEcVids = append(message.DeletedEcVids, uint32(s.VolumeId))
  190. }
  191. }
  192. if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 || len(message.NewEcVids) > 0 || len(message.DeletedEcVids) > 0 {
  193. ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: message})
  194. }
  195. // tell the volume servers about the leader
  196. newLeader, err := ms.Topo.Leader()
  197. if err != nil {
  198. glog.Warningf("SendHeartbeat find leader: %v", err)
  199. return err
  200. }
  201. if err := stream.Send(&master_pb.HeartbeatResponse{
  202. Leader: string(newLeader),
  203. }); err != nil {
  204. glog.Warningf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err)
  205. return err
  206. }
  207. }
  208. }
  209. // KeepConnected keep a stream gRPC call to the master. Used by clients to know the master is up.
  210. // And clients gets the up-to-date list of volume locations
  211. func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServer) error {
  212. req, recvErr := stream.Recv()
  213. if recvErr != nil {
  214. return recvErr
  215. }
  216. if !ms.Topo.IsLeader() {
  217. return ms.informNewLeader(stream)
  218. }
  219. peerAddress := pb.ServerAddress(req.ClientAddress)
  220. // buffer by 1 so we don't end up getting stuck writing to stopChan forever
  221. stopChan := make(chan bool, 1)
  222. clientName, messageChan := ms.addClient(req.FilerGroup, req.ClientType, peerAddress)
  223. for _, update := range ms.Cluster.AddClusterNode(req.FilerGroup, req.ClientType, cluster.DataCenter(req.DataCenter), cluster.Rack(req.Rack), peerAddress, req.Version) {
  224. ms.broadcastToClients(update)
  225. }
  226. defer func() {
  227. for _, update := range ms.Cluster.RemoveClusterNode(req.FilerGroup, req.ClientType, peerAddress) {
  228. ms.broadcastToClients(update)
  229. }
  230. ms.deleteClient(clientName)
  231. }()
  232. for i, message := range ms.Topo.ToVolumeLocations() {
  233. if i == 0 {
  234. if leader, err := ms.Topo.Leader(); err == nil {
  235. message.Leader = string(leader)
  236. }
  237. }
  238. if sendErr := stream.Send(&master_pb.KeepConnectedResponse{VolumeLocation: message}); sendErr != nil {
  239. return sendErr
  240. }
  241. }
  242. go func() {
  243. for {
  244. _, err := stream.Recv()
  245. if err != nil {
  246. glog.V(2).Infof("- client %v: %v", clientName, err)
  247. close(stopChan)
  248. return
  249. }
  250. }
  251. }()
  252. ticker := time.NewTicker(5 * time.Second)
  253. for {
  254. select {
  255. case message := <-messageChan:
  256. if err := stream.Send(message); err != nil {
  257. glog.V(0).Infof("=> client %v: %+v", clientName, message)
  258. return err
  259. }
  260. case <-ticker.C:
  261. if !ms.Topo.IsLeader() {
  262. stats.MasterRaftIsleader.Set(0)
  263. return ms.informNewLeader(stream)
  264. } else {
  265. stats.MasterRaftIsleader.Set(1)
  266. }
  267. case <-stopChan:
  268. return nil
  269. }
  270. }
  271. }
  272. func (ms *MasterServer) broadcastToClients(message *master_pb.KeepConnectedResponse) {
  273. ms.clientChansLock.RLock()
  274. for _, ch := range ms.clientChans {
  275. ch <- message
  276. }
  277. ms.clientChansLock.RUnlock()
  278. }
  279. func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
  280. leader, err := ms.Topo.Leader()
  281. if err != nil {
  282. glog.Errorf("topo leader: %v", err)
  283. return raft.NotLeaderError
  284. }
  285. if err := stream.Send(&master_pb.KeepConnectedResponse{
  286. VolumeLocation: &master_pb.VolumeLocation{
  287. Leader: string(leader),
  288. },
  289. }); err != nil {
  290. return err
  291. }
  292. return nil
  293. }
  294. func (ms *MasterServer) addClient(filerGroup, clientType string, clientAddress pb.ServerAddress) (clientName string, messageChan chan *master_pb.KeepConnectedResponse) {
  295. clientName = filerGroup + "." + clientType + "@" + string(clientAddress)
  296. glog.V(0).Infof("+ client %v", clientName)
  297. // we buffer this because otherwise we end up in a potential deadlock where
  298. // the KeepConnected loop is no longer listening on this channel but we're
  299. // trying to send to it in SendHeartbeat and so we can't lock the
  300. // clientChansLock to remove the channel and we're stuck writing to it
  301. // 100 is probably overkill
  302. messageChan = make(chan *master_pb.KeepConnectedResponse, 100)
  303. ms.clientChansLock.Lock()
  304. ms.clientChans[clientName] = messageChan
  305. ms.clientChansLock.Unlock()
  306. return
  307. }
  308. func (ms *MasterServer) deleteClient(clientName string) {
  309. glog.V(0).Infof("- client %v", clientName)
  310. ms.clientChansLock.Lock()
  311. delete(ms.clientChans, clientName)
  312. ms.clientChansLock.Unlock()
  313. }
  314. func findClientAddress(ctx context.Context, grpcPort uint32) string {
  315. // fmt.Printf("FromContext %+v\n", ctx)
  316. pr, ok := peer.FromContext(ctx)
  317. if !ok {
  318. glog.Error("failed to get peer from ctx")
  319. return ""
  320. }
  321. if pr.Addr == net.Addr(nil) {
  322. glog.Error("failed to get peer address")
  323. return ""
  324. }
  325. if grpcPort == 0 {
  326. return pr.Addr.String()
  327. }
  328. if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok {
  329. externalIP := tcpAddr.IP
  330. return util.JoinHostPort(externalIP.String(), int(grpcPort))
  331. }
  332. return pr.Addr.String()
  333. }
  334. func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) {
  335. // tell the volume servers about the leader
  336. leader, _ := ms.Topo.Leader()
  337. resp := &master_pb.GetMasterConfigurationResponse{
  338. MetricsAddress: ms.option.MetricsAddress,
  339. MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
  340. StorageBackends: backend.ToPbStorageBackends(),
  341. DefaultReplication: ms.option.DefaultReplicaPlacement,
  342. VolumeSizeLimitMB: uint32(ms.option.VolumeSizeLimitMB),
  343. VolumePreallocate: ms.option.VolumePreallocate,
  344. Leader: string(leader),
  345. }
  346. return resp, nil
  347. }