You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

328 lines
11 KiB

6 years ago
2 years ago
1 year ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
2 years ago
4 years ago
3 years ago
2 years ago
1 year ago
1 year ago
2 years ago
6 years ago
  1. package wdclient
  2. import (
  3. "context"
  4. "fmt"
  5. "math/rand"
  6. "sync"
  7. "time"
  8. "github.com/seaweedfs/seaweedfs/weed/stats"
  9. "github.com/seaweedfs/seaweedfs/weed/util"
  10. "google.golang.org/grpc"
  11. "github.com/seaweedfs/seaweedfs/weed/glog"
  12. "github.com/seaweedfs/seaweedfs/weed/pb"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  14. )
  15. type MasterClient struct {
  16. FilerGroup string
  17. clientType string
  18. clientHost pb.ServerAddress
  19. rack string
  20. currentMaster pb.ServerAddress
  21. currentMasterLock sync.RWMutex
  22. masters pb.ServerDiscovery
  23. grpcDialOption grpc.DialOption
  24. *vidMap
  25. vidMapCacheSize int
  26. OnPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)
  27. OnPeerUpdateLock sync.RWMutex
  28. }
  29. func NewMasterClient(grpcDialOption grpc.DialOption, filerGroup string, clientType string, clientHost pb.ServerAddress, clientDataCenter string, rack string, masters pb.ServerDiscovery) *MasterClient {
  30. return &MasterClient{
  31. FilerGroup: filerGroup,
  32. clientType: clientType,
  33. clientHost: clientHost,
  34. rack: rack,
  35. masters: masters,
  36. grpcDialOption: grpcDialOption,
  37. vidMap: newVidMap(clientDataCenter),
  38. vidMapCacheSize: 5,
  39. }
  40. }
  41. func (mc *MasterClient) SetOnPeerUpdateFn(onPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)) {
  42. mc.OnPeerUpdateLock.Lock()
  43. mc.OnPeerUpdate = onPeerUpdate
  44. mc.OnPeerUpdateLock.Unlock()
  45. }
  46. func (mc *MasterClient) GetLookupFileIdFunction() LookupFileIdFunctionType {
  47. return mc.LookupFileIdWithFallback
  48. }
  49. func (mc *MasterClient) LookupFileIdWithFallback(fileId string) (fullUrls []string, err error) {
  50. fullUrls, err = mc.vidMap.LookupFileId(fileId)
  51. if err == nil && len(fullUrls) > 0 {
  52. return
  53. }
  54. err = pb.WithMasterClient(false, mc.GetMaster(), mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  55. resp, err := client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{
  56. VolumeOrFileIds: []string{fileId},
  57. })
  58. if err != nil {
  59. return fmt.Errorf("LookupVolume %s failed: %v", fileId, err)
  60. }
  61. for vid, vidLocation := range resp.VolumeIdLocations {
  62. for _, vidLoc := range vidLocation.Locations {
  63. loc := Location{
  64. Url: vidLoc.Url,
  65. PublicUrl: vidLoc.PublicUrl,
  66. GrpcPort: int(vidLoc.GrpcPort),
  67. DataCenter: vidLoc.DataCenter,
  68. }
  69. mc.vidMap.addLocation(uint32(vid), loc)
  70. httpUrl := "http://" + loc.Url + "/" + fileId
  71. // Prefer same data center
  72. if mc.DataCenter != "" && mc.DataCenter == loc.DataCenter {
  73. fullUrls = append([]string{httpUrl}, fullUrls...)
  74. } else {
  75. fullUrls = append(fullUrls, httpUrl)
  76. }
  77. }
  78. }
  79. return nil
  80. })
  81. return
  82. }
  83. func (mc *MasterClient) getCurrentMaster() pb.ServerAddress {
  84. mc.currentMasterLock.RLock()
  85. defer mc.currentMasterLock.RUnlock()
  86. return mc.currentMaster
  87. }
  88. func (mc *MasterClient) setCurrentMaster(master pb.ServerAddress) {
  89. mc.currentMasterLock.Lock()
  90. mc.currentMaster = master
  91. mc.currentMasterLock.Unlock()
  92. }
  93. func (mc *MasterClient) GetMaster() pb.ServerAddress {
  94. mc.WaitUntilConnected()
  95. return mc.getCurrentMaster()
  96. }
  97. func (mc *MasterClient) GetMasters() []pb.ServerAddress {
  98. mc.WaitUntilConnected()
  99. return mc.masters.GetInstances()
  100. }
  101. func (mc *MasterClient) WaitUntilConnected() {
  102. for {
  103. if mc.getCurrentMaster() != "" {
  104. return
  105. }
  106. time.Sleep(time.Duration(rand.Int31n(200)) * time.Millisecond)
  107. print(".")
  108. }
  109. }
  110. func (mc *MasterClient) KeepConnectedToMaster() {
  111. glog.V(1).Infof("%s.%s masterClient bootstraps with masters %v", mc.FilerGroup, mc.clientType, mc.masters)
  112. for {
  113. mc.tryAllMasters()
  114. time.Sleep(time.Second)
  115. }
  116. }
  117. func (mc *MasterClient) FindLeaderFromOtherPeers(myMasterAddress pb.ServerAddress) (leader string) {
  118. for _, master := range mc.masters.GetInstances() {
  119. if master == myMasterAddress {
  120. continue
  121. }
  122. if grpcErr := pb.WithMasterClient(false, master, mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  123. ctx, cancel := context.WithTimeout(context.Background(), 120*time.Millisecond)
  124. defer cancel()
  125. resp, err := client.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{})
  126. if err != nil {
  127. return err
  128. }
  129. leader = resp.Leader
  130. return nil
  131. }); grpcErr != nil {
  132. glog.V(0).Infof("connect to %s: %v", master, grpcErr)
  133. }
  134. if leader != "" {
  135. glog.V(0).Infof("existing leader is %s", leader)
  136. return
  137. }
  138. }
  139. glog.V(0).Infof("No existing leader found!")
  140. return
  141. }
  142. func (mc *MasterClient) tryAllMasters() {
  143. var nextHintedLeader pb.ServerAddress
  144. mc.masters.RefreshBySrvIfAvailable()
  145. for _, master := range mc.masters.GetInstances() {
  146. nextHintedLeader = mc.tryConnectToMaster(master)
  147. for nextHintedLeader != "" {
  148. nextHintedLeader = mc.tryConnectToMaster(nextHintedLeader)
  149. }
  150. mc.setCurrentMaster("")
  151. }
  152. }
  153. func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedLeader pb.ServerAddress) {
  154. glog.V(1).Infof("%s.%s masterClient Connecting to master %v", mc.FilerGroup, mc.clientType, master)
  155. stats.MasterClientConnectCounter.WithLabelValues("total").Inc()
  156. gprcErr := pb.WithMasterClient(true, master, mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  157. ctx, cancel := context.WithCancel(context.Background())
  158. defer cancel()
  159. stream, err := client.KeepConnected(ctx)
  160. if err != nil {
  161. glog.V(1).Infof("%s.%s masterClient failed to keep connected to %s: %v", mc.FilerGroup, mc.clientType, master, err)
  162. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToKeepConnected).Inc()
  163. return err
  164. }
  165. if err = stream.Send(&master_pb.KeepConnectedRequest{
  166. FilerGroup: mc.FilerGroup,
  167. DataCenter: mc.DataCenter,
  168. Rack: mc.rack,
  169. ClientType: mc.clientType,
  170. ClientAddress: string(mc.clientHost),
  171. Version: util.Version(),
  172. }); err != nil {
  173. glog.V(0).Infof("%s.%s masterClient failed to send to %s: %v", mc.FilerGroup, mc.clientType, master, err)
  174. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToSend).Inc()
  175. return err
  176. }
  177. glog.V(1).Infof("%s.%s masterClient Connected to %v", mc.FilerGroup, mc.clientType, master)
  178. resp, err := stream.Recv()
  179. if err != nil {
  180. glog.V(0).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err)
  181. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToReceive).Inc()
  182. return err
  183. }
  184. // check if it is the leader to determine whether to reset the vidMap
  185. if resp.VolumeLocation != nil {
  186. if resp.VolumeLocation.Leader != "" && string(master) != resp.VolumeLocation.Leader {
  187. glog.V(0).Infof("master %v redirected to leader %v", master, resp.VolumeLocation.Leader)
  188. nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader)
  189. stats.MasterClientConnectCounter.WithLabelValues(stats.RedirectedToLeader).Inc()
  190. return nil
  191. }
  192. mc.resetVidMap()
  193. mc.updateVidMap(resp)
  194. } else {
  195. mc.resetVidMap()
  196. }
  197. mc.setCurrentMaster(master)
  198. for {
  199. resp, err := stream.Recv()
  200. if err != nil {
  201. glog.V(0).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err)
  202. stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToReceive).Inc()
  203. return err
  204. }
  205. if resp.VolumeLocation != nil {
  206. // maybe the leader is changed
  207. if resp.VolumeLocation.Leader != "" && string(mc.GetMaster()) != resp.VolumeLocation.Leader {
  208. glog.V(0).Infof("currentMaster %v redirected to leader %v", mc.GetMaster(), resp.VolumeLocation.Leader)
  209. nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader)
  210. stats.MasterClientConnectCounter.WithLabelValues(stats.RedirectedToLeader).Inc()
  211. return nil
  212. }
  213. mc.updateVidMap(resp)
  214. }
  215. if resp.ClusterNodeUpdate != nil {
  216. update := resp.ClusterNodeUpdate
  217. mc.OnPeerUpdateLock.RLock()
  218. if mc.OnPeerUpdate != nil {
  219. if update.FilerGroup == mc.FilerGroup {
  220. if update.IsAdd {
  221. glog.V(0).Infof("+ %s@%s noticed %s.%s %s\n", mc.clientType, mc.clientHost, update.FilerGroup, update.NodeType, update.Address)
  222. } else {
  223. glog.V(0).Infof("- %s@%s noticed %s.%s %s\n", mc.clientType, mc.clientHost, update.FilerGroup, update.NodeType, update.Address)
  224. }
  225. stats.MasterClientConnectCounter.WithLabelValues(stats.OnPeerUpdate).Inc()
  226. mc.OnPeerUpdate(update, time.Now())
  227. }
  228. }
  229. mc.OnPeerUpdateLock.RUnlock()
  230. }
  231. }
  232. })
  233. if gprcErr != nil {
  234. stats.MasterClientConnectCounter.WithLabelValues(stats.Failed).Inc()
  235. glog.V(1).Infof("%s.%s masterClient failed to connect with master %v: %v", mc.FilerGroup, mc.clientType, master, gprcErr)
  236. }
  237. return
  238. }
  239. func (mc *MasterClient) updateVidMap(resp *master_pb.KeepConnectedResponse) {
  240. if resp.VolumeLocation.IsEmptyUrl() {
  241. glog.V(0).Infof("updateVidMap ignore short heartbeat: %+v", resp)
  242. return
  243. }
  244. // process new volume location
  245. loc := Location{
  246. Url: resp.VolumeLocation.Url,
  247. PublicUrl: resp.VolumeLocation.PublicUrl,
  248. DataCenter: resp.VolumeLocation.DataCenter,
  249. GrpcPort: int(resp.VolumeLocation.GrpcPort),
  250. }
  251. for _, newVid := range resp.VolumeLocation.NewVids {
  252. glog.V(2).Infof("%s.%s: %s masterClient adds volume %d", mc.FilerGroup, mc.clientType, loc.Url, newVid)
  253. mc.addLocation(newVid, loc)
  254. }
  255. for _, deletedVid := range resp.VolumeLocation.DeletedVids {
  256. glog.V(2).Infof("%s.%s: %s masterClient removes volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedVid)
  257. mc.deleteLocation(deletedVid, loc)
  258. }
  259. for _, newEcVid := range resp.VolumeLocation.NewEcVids {
  260. glog.V(2).Infof("%s.%s: %s masterClient adds ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, newEcVid)
  261. mc.addEcLocation(newEcVid, loc)
  262. }
  263. for _, deletedEcVid := range resp.VolumeLocation.DeletedEcVids {
  264. glog.V(2).Infof("%s.%s: %s masterClient removes ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedEcVid)
  265. mc.deleteEcLocation(deletedEcVid, loc)
  266. }
  267. glog.V(1).Infof("updateVidMap(%s) %s.%s: %s volume add: %d, del: %d, add ec: %d del ec: %d",
  268. resp.VolumeLocation.DataCenter, mc.FilerGroup, mc.clientType, loc.Url,
  269. len(resp.VolumeLocation.NewVids), len(resp.VolumeLocation.DeletedVids),
  270. len(resp.VolumeLocation.NewEcVids), len(resp.VolumeLocation.DeletedEcVids))
  271. }
  272. func (mc *MasterClient) WithClient(streamingMode bool, fn func(client master_pb.SeaweedClient) error) error {
  273. return util.Retry("master grpc", func() error {
  274. return pb.WithMasterClient(streamingMode, mc.GetMaster(), mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
  275. return fn(client)
  276. })
  277. })
  278. }
  279. func (mc *MasterClient) resetVidMap() {
  280. tail := &vidMap{
  281. vid2Locations: mc.vid2Locations,
  282. ecVid2Locations: mc.ecVid2Locations,
  283. DataCenter: mc.DataCenter,
  284. cache: mc.cache,
  285. }
  286. nvm := newVidMap(mc.DataCenter)
  287. nvm.cache = tail
  288. mc.vidMap = nvm
  289. //trim
  290. for i := 0; i < mc.vidMapCacheSize && tail.cache != nil; i++ {
  291. if i == mc.vidMapCacheSize-1 {
  292. tail.cache = nil
  293. } else {
  294. tail = tail.cache
  295. }
  296. }
  297. }