You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

326 lines
11 KiB

6 years ago
2 years ago
2 years ago
3 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
4 years ago
3 years ago
2 years ago
2 years ago
6 years ago
  1. package wdclient
  2. import (
  3. "context"
  4. "fmt"
  5. "math/rand"
  6. "sync"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/stats"
  9. "github.com/seaweedfs/seaweedfs/weed/util"
  10. "google.golang.org/grpc"
  11. "github.com/seaweedfs/seaweedfs/weed/glog"
  12. "github.com/seaweedfs/seaweedfs/weed/pb"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  14. )
  15. type MasterClient struct {
  16. FilerGroup string
  17. clientType string
  18. clientHost pb.ServerAddress
  19. rack string
  20. currentMaster pb.ServerAddress
  21. currentMasterLock sync.RWMutex
  22. masters map[string]pb.ServerAddress
  23. grpcDialOption grpc.DialOption
  24. *vidMap
  25. vidMapCacheSize int
  26. OnPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)
  27. OnPeerUpdateLock sync.RWMutex
  28. }
  29. func NewMasterClient(grpcDialOption grpc.DialOption, filerGroup string, clientType string, clientHost pb.ServerAddress, clientDataCenter string, rack string, masters map[string]pb.ServerAddress) *MasterClient {
  30. return &MasterClient{
  31. FilerGroup: filerGroup,
  32. clientType: clientType,
  33. clientHost: clientHost,
  34. rack: rack,
  35. masters: masters,
  36. grpcDialOption: grpcDialOption,
  37. vidMap: newVidMap(clientDataCenter),
  38. vidMapCacheSize: 5,
  39. }
  40. }
  41. func (mc *MasterClient) SetOnPeerUpdateFn(onPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)) {
  42. mc.OnPeerUpdateLock.Lock()
  43. mc.OnPeerUpdate = onPeerUpdate
  44. mc.OnPeerUpdateLock.Unlock()
  45. }
  46. func (mc *MasterClient) GetLookupFileIdFunction() LookupFileIdFunctionType {
  47. return mc.LookupFileIdWithFallback
  48. }
  49. func (mc *MasterClient) LookupFileIdWithFallback(fileId string) (fullUrls []string, err error) {
  50. fullUrls, err = mc.vidMap.LookupFileId(fileId)
  51. if err == nil && len(fullUrls) > 0 {
  52. return
  53. }
  54. err = pb.WithMasterClient(false, mc.GetMaster(), mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  55. resp, err := client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{
  56. VolumeOrFileIds: []string{fileId},
  57. })
  58. if err != nil {
  59. return fmt.Errorf("LookupVolume %s failed: %v", fileId, err)
  60. }
  61. for vid, vidLocation := range resp.VolumeIdLocations {
  62. for _, vidLoc := range vidLocation.Locations {
  63. loc := Location{
  64. Url: vidLoc.Url,
  65. PublicUrl: vidLoc.PublicUrl,
  66. GrpcPort: int(vidLoc.GrpcPort),
  67. DataCenter: vidLoc.DataCenter,
  68. }
  69. mc.vidMap.addLocation(uint32(vid), loc)
  70. httpUrl := "http://" + loc.Url + "/" + fileId
  71. // Prefer same data center
  72. if mc.DataCenter != "" && mc.DataCenter == loc.DataCenter {
  73. fullUrls = append([]string{httpUrl}, fullUrls...)
  74. } else {
  75. fullUrls = append(fullUrls, httpUrl)
  76. }
  77. }
  78. }
  79. return nil
  80. })
  81. return
  82. }
  83. func (mc *MasterClient) getCurrentMaster() pb.ServerAddress {
  84. mc.currentMasterLock.RLock()
  85. defer mc.currentMasterLock.RUnlock()
  86. return mc.currentMaster
  87. }
  88. func (mc *MasterClient) setCurrentMaster(master pb.ServerAddress) {
  89. mc.currentMasterLock.Lock()
  90. mc.currentMaster = master
  91. mc.currentMasterLock.Unlock()
  92. }
  93. func (mc *MasterClient) GetMaster() pb.ServerAddress {
  94. mc.WaitUntilConnected()
  95. return mc.getCurrentMaster()
  96. }
  97. func (mc *MasterClient) GetMasters() map[string]pb.ServerAddress {
  98. mc.WaitUntilConnected()
  99. return mc.masters
  100. }
  101. func (mc *MasterClient) WaitUntilConnected() {
  102. for {
  103. if mc.getCurrentMaster() != "" {
  104. return
  105. }
  106. time.Sleep(time.Duration(rand.Int31n(200)) * time.Millisecond)
  107. }
  108. }
  109. func (mc *MasterClient) KeepConnectedToMaster() {
  110. glog.V(1).Infof("%s.%s masterClient bootstraps with masters %v", mc.FilerGroup, mc.clientType, mc.masters)
  111. for {
  112. mc.tryAllMasters()
  113. time.Sleep(time.Second)
  114. }
  115. }
  116. func (mc *MasterClient) FindLeaderFromOtherPeers(myMasterAddress pb.ServerAddress) (leader string) {
  117. for _, master := range mc.masters {
  118. if master == myMasterAddress {
  119. continue
  120. }
  121. if grpcErr := pb.WithMasterClient(false, master, mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  122. ctx, cancel := context.WithTimeout(context.Background(), 120*time.Millisecond)
  123. defer cancel()
  124. resp, err := client.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{})
  125. if err != nil {
  126. return err
  127. }
  128. leader = resp.Leader
  129. return nil
  130. }); grpcErr != nil {
  131. glog.V(0).Infof("connect to %s: %v", master, grpcErr)
  132. }
  133. if leader != "" {
  134. glog.V(0).Infof("existing leader is %s", leader)
  135. return
  136. }
  137. }
  138. glog.V(0).Infof("No existing leader found!")
  139. return
  140. }
  141. func (mc *MasterClient) tryAllMasters() {
  142. var nextHintedLeader pb.ServerAddress
  143. for _, master := range mc.masters {
  144. nextHintedLeader = mc.tryConnectToMaster(master)
  145. for nextHintedLeader != "" {
  146. nextHintedLeader = mc.tryConnectToMaster(nextHintedLeader)
  147. }
  148. mc.setCurrentMaster("")
  149. }
  150. }
  151. func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedLeader pb.ServerAddress) {
  152. glog.V(1).Infof("%s.%s masterClient Connecting to master %v", mc.FilerGroup, mc.clientType, master)
  153. stats.MasterClientConnectCounter.WithLabelValues("total").Inc()
  154. gprcErr := pb.WithMasterClient(true, master, mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  155. ctx, cancel := context.WithCancel(context.Background())
  156. defer cancel()
  157. stream, err := client.KeepConnected(ctx)
  158. if err != nil {
  159. glog.V(1).Infof("%s.%s masterClient failed to keep connected to %s: %v", mc.FilerGroup, mc.clientType, master, err)
  160. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToKeepConnected).Inc()
  161. return err
  162. }
  163. if err = stream.Send(&master_pb.KeepConnectedRequest{
  164. FilerGroup: mc.FilerGroup,
  165. DataCenter: mc.DataCenter,
  166. Rack: mc.rack,
  167. ClientType: mc.clientType,
  168. ClientAddress: string(mc.clientHost),
  169. Version: util.Version(),
  170. }); err != nil {
  171. glog.V(0).Infof("%s.%s masterClient failed to send to %s: %v", mc.FilerGroup, mc.clientType, master, err)
  172. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToSend).Inc()
  173. return err
  174. }
  175. glog.V(1).Infof("%s.%s masterClient Connected to %v", mc.FilerGroup, mc.clientType, master)
  176. resp, err := stream.Recv()
  177. if err != nil {
  178. glog.V(0).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err)
  179. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToReceive).Inc()
  180. return err
  181. }
  182. // check if it is the leader to determine whether to reset the vidMap
  183. if resp.VolumeLocation != nil {
  184. if resp.VolumeLocation.Leader != "" && string(master) != resp.VolumeLocation.Leader {
  185. glog.V(0).Infof("master %v redirected to leader %v", master, resp.VolumeLocation.Leader)
  186. nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader)
  187. stats.MasterClientConnectCounter.WithLabelValues(stats.RedirectedToLeader).Inc()
  188. return nil
  189. }
  190. mc.resetVidMap()
  191. mc.updateVidMap(resp)
  192. } else {
  193. mc.resetVidMap()
  194. }
  195. mc.setCurrentMaster(master)
  196. for {
  197. resp, err := stream.Recv()
  198. if err != nil {
  199. glog.V(0).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err)
  200. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToReceive).Inc()
  201. return err
  202. }
  203. if resp.VolumeLocation != nil {
  204. // maybe the leader is changed
  205. if resp.VolumeLocation.Leader != "" && string(mc.GetMaster()) != resp.VolumeLocation.Leader {
  206. glog.V(0).Infof("currentMaster %v redirected to leader %v", mc.GetMaster(), resp.VolumeLocation.Leader)
  207. nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader)
  208. stats.MasterClientConnectCounter.WithLabelValues(stats.RedirectedToLeader).Inc()
  209. return nil
  210. }
  211. mc.updateVidMap(resp)
  212. }
  213. if resp.ClusterNodeUpdate != nil {
  214. update := resp.ClusterNodeUpdate
  215. mc.OnPeerUpdateLock.RLock()
  216. if mc.OnPeerUpdate != nil {
  217. if update.FilerGroup == mc.FilerGroup {
  218. if update.IsAdd {
  219. glog.V(0).Infof("+ %s.%s %s leader:%v\n", update.FilerGroup, update.NodeType, update.Address, update.IsLeader)
  220. } else {
  221. glog.V(0).Infof("- %s.%s %s leader:%v\n", update.FilerGroup, update.NodeType, update.Address, update.IsLeader)
  222. }
  223. stats.MasterClientConnectCounter.WithLabelValues(stats.OnPeerUpdate).Inc()
  224. mc.OnPeerUpdate(update, time.Now())
  225. }
  226. }
  227. mc.OnPeerUpdateLock.RUnlock()
  228. }
  229. }
  230. })
  231. if gprcErr != nil {
  232. stats.MasterClientConnectCounter.WithLabelValues(stats.Failed).Inc()
  233. glog.V(1).Infof("%s.%s masterClient failed to connect with master %v: %v", mc.FilerGroup, mc.clientType, master, gprcErr)
  234. }
  235. return
  236. }
  237. func (mc *MasterClient) updateVidMap(resp *master_pb.KeepConnectedResponse) {
  238. if resp.VolumeLocation.IsEmptyUrl() {
  239. glog.V(0).Infof("updateVidMap ignore short heartbeat: %+v", resp)
  240. return
  241. }
  242. // process new volume location
  243. loc := Location{
  244. Url: resp.VolumeLocation.Url,
  245. PublicUrl: resp.VolumeLocation.PublicUrl,
  246. DataCenter: resp.VolumeLocation.DataCenter,
  247. GrpcPort: int(resp.VolumeLocation.GrpcPort),
  248. }
  249. for _, newVid := range resp.VolumeLocation.NewVids {
  250. glog.V(2).Infof("%s.%s: %s masterClient adds volume %d", mc.FilerGroup, mc.clientType, loc.Url, newVid)
  251. mc.addLocation(newVid, loc)
  252. }
  253. for _, deletedVid := range resp.VolumeLocation.DeletedVids {
  254. glog.V(2).Infof("%s.%s: %s masterClient removes volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedVid)
  255. mc.deleteLocation(deletedVid, loc)
  256. }
  257. for _, newEcVid := range resp.VolumeLocation.NewEcVids {
  258. glog.V(2).Infof("%s.%s: %s masterClient adds ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, newEcVid)
  259. mc.addEcLocation(newEcVid, loc)
  260. }
  261. for _, deletedEcVid := range resp.VolumeLocation.DeletedEcVids {
  262. glog.V(2).Infof("%s.%s: %s masterClient removes ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedEcVid)
  263. mc.deleteEcLocation(deletedEcVid, loc)
  264. }
  265. glog.V(1).Infof("updateVidMap(%s) %s.%s: %s volume add: %d, del: %d, add ec: %d del ec: %d",
  266. resp.VolumeLocation.DataCenter, mc.FilerGroup, mc.clientType, loc.Url,
  267. len(resp.VolumeLocation.NewVids), len(resp.VolumeLocation.DeletedVids),
  268. len(resp.VolumeLocation.NewEcVids), len(resp.VolumeLocation.DeletedEcVids))
  269. }
  270. func (mc *MasterClient) WithClient(streamingMode bool, fn func(client master_pb.SeaweedClient) error) error {
  271. return util.Retry("master grpc", func() error {
  272. return pb.WithMasterClient(streamingMode, mc.GetMaster(), mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  273. return fn(client)
  274. })
  275. })
  276. }
  277. func (mc *MasterClient) resetVidMap() {
  278. tail := &vidMap{
  279. vid2Locations: mc.vid2Locations,
  280. ecVid2Locations: mc.ecVid2Locations,
  281. DataCenter: mc.DataCenter,
  282. cache: mc.cache,
  283. }
  284. nvm := newVidMap(mc.DataCenter)
  285. nvm.cache = tail
  286. mc.vidMap = nvm
  287. //trim
  288. for i := 0; i < mc.vidMapCacheSize && tail.cache != nil; i++ {
  289. if i == mc.vidMapCacheSize-1 {
  290. tail.cache = nil
  291. } else {
  292. tail = tail.cache
  293. }
  294. }
  295. }