You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

226 lines
6.6 KiB

  1. package weed_server
  2. // https://yusufs.medium.com/creating-distributed-kv-database-by-implementing-raft-consensus-using-golang-d0884eef2e28
  3. // https://github.com/Jille/raft-grpc-example/blob/cd5bcab0218f008e044fbeee4facdd01b06018ad/application.go#L18
  4. import (
  5. "fmt"
  6. "math/rand/v2"
  7. "os"
  8. "path"
  9. "path/filepath"
  10. "sort"
  11. "strings"
  12. "time"
  13. transport "github.com/Jille/raft-grpc-transport"
  14. "github.com/armon/go-metrics"
  15. "github.com/armon/go-metrics/prometheus"
  16. "github.com/hashicorp/raft"
  17. boltdb "github.com/hashicorp/raft-boltdb/v2"
  18. "github.com/seaweedfs/seaweedfs/weed/glog"
  19. "github.com/seaweedfs/seaweedfs/weed/pb"
  20. "github.com/seaweedfs/seaweedfs/weed/stats"
  21. "google.golang.org/grpc"
  22. )
  23. const (
  24. ldbFile = "logs.dat"
  25. sdbFile = "stable.dat"
  26. updatePeersTimeout = 15 * time.Minute
  27. )
  28. func getPeerIdx(self pb.ServerAddress, mapPeers map[string]pb.ServerAddress) int {
  29. peers := make([]pb.ServerAddress, 0, len(mapPeers))
  30. for _, peer := range mapPeers {
  31. peers = append(peers, peer)
  32. }
  33. sort.Slice(peers, func(i, j int) bool {
  34. return strings.Compare(string(peers[i]), string(peers[j])) < 0
  35. })
  36. for i, peer := range peers {
  37. if string(peer) == string(self) {
  38. return i
  39. }
  40. }
  41. return -1
  42. }
  43. func (s *RaftServer) AddPeersConfiguration() (cfg raft.Configuration) {
  44. for _, peer := range s.peers {
  45. cfg.Servers = append(cfg.Servers, raft.Server{
  46. Suffrage: raft.Voter,
  47. ID: raft.ServerID(peer),
  48. Address: raft.ServerAddress(peer.ToGrpcAddress()),
  49. })
  50. }
  51. return cfg
  52. }
  53. func (s *RaftServer) monitorLeaderLoop(updatePeers bool) {
  54. for {
  55. prevLeader, _ := s.RaftHashicorp.LeaderWithID()
  56. select {
  57. case isLeader := <-s.RaftHashicorp.LeaderCh():
  58. leader, _ := s.RaftHashicorp.LeaderWithID()
  59. if isLeader {
  60. if updatePeers {
  61. s.updatePeers()
  62. updatePeers = false
  63. }
  64. s.topo.DoBarrier()
  65. stats.MasterLeaderChangeCounter.WithLabelValues(fmt.Sprintf("%+v", leader)).Inc()
  66. } else {
  67. s.topo.BarrierReset()
  68. }
  69. glog.V(0).Infof("is leader %+v change event: %+v => %+v", isLeader, prevLeader, leader)
  70. prevLeader = leader
  71. }
  72. }
  73. }
  74. func (s *RaftServer) updatePeers() {
  75. peerLeader := string(s.serverAddr)
  76. existsPeerName := make(map[string]bool)
  77. for _, server := range s.RaftHashicorp.GetConfiguration().Configuration().Servers {
  78. if string(server.ID) == peerLeader {
  79. continue
  80. }
  81. existsPeerName[string(server.ID)] = true
  82. }
  83. for _, peer := range s.peers {
  84. peerName := string(peer)
  85. if peerName == peerLeader || existsPeerName[peerName] {
  86. continue
  87. }
  88. glog.V(0).Infof("adding new peer: %s", peerName)
  89. s.RaftHashicorp.AddVoter(
  90. raft.ServerID(peerName), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0)
  91. }
  92. for peer := range existsPeerName {
  93. if _, found := s.peers[peer]; !found {
  94. glog.V(0).Infof("removing old peer: %s", peer)
  95. s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0)
  96. }
  97. }
  98. if _, found := s.peers[peerLeader]; !found {
  99. glog.V(0).Infof("removing old leader peer: %s", peerLeader)
  100. s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0)
  101. }
  102. }
  103. func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
  104. s := &RaftServer{
  105. peers: option.Peers,
  106. serverAddr: option.ServerAddr,
  107. dataDir: option.DataDir,
  108. topo: option.Topo,
  109. }
  110. c := raft.DefaultConfig()
  111. c.LocalID = raft.ServerID(s.serverAddr) // TODO maybee the IP:port address will change
  112. c.HeartbeatTimeout = time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1))
  113. c.ElectionTimeout = option.ElectionTimeout
  114. if c.LeaderLeaseTimeout > c.HeartbeatTimeout {
  115. c.LeaderLeaseTimeout = c.HeartbeatTimeout
  116. }
  117. if glog.V(4) {
  118. c.LogLevel = "Debug"
  119. } else if glog.V(2) {
  120. c.LogLevel = "Info"
  121. } else if glog.V(1) {
  122. c.LogLevel = "Warn"
  123. } else if glog.V(0) {
  124. c.LogLevel = "Error"
  125. }
  126. if err := raft.ValidateConfig(c); err != nil {
  127. return nil, fmt.Errorf(`raft.ValidateConfig: %v`, err)
  128. }
  129. if option.RaftBootstrap {
  130. os.RemoveAll(path.Join(s.dataDir, ldbFile))
  131. os.RemoveAll(path.Join(s.dataDir, sdbFile))
  132. os.RemoveAll(path.Join(s.dataDir, "snapshots"))
  133. }
  134. if err := os.MkdirAll(path.Join(s.dataDir, "snapshots"), os.ModePerm); err != nil {
  135. return nil, err
  136. }
  137. baseDir := s.dataDir
  138. ldb, err := boltdb.NewBoltStore(filepath.Join(baseDir, ldbFile))
  139. if err != nil {
  140. return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "logs.dat"), err)
  141. }
  142. sdb, err := boltdb.NewBoltStore(filepath.Join(baseDir, sdbFile))
  143. if err != nil {
  144. return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "stable.dat"), err)
  145. }
  146. fss, err := raft.NewFileSnapshotStore(baseDir, 3, os.Stderr)
  147. if err != nil {
  148. return nil, fmt.Errorf(`raft.NewFileSnapshotStore(%q, ...): %v`, baseDir, err)
  149. }
  150. s.TransportManager = transport.New(raft.ServerAddress(s.serverAddr), []grpc.DialOption{option.GrpcDialOption})
  151. stateMachine := StateMachine{topo: option.Topo}
  152. s.RaftHashicorp, err = raft.NewRaft(c, &stateMachine, ldb, sdb, fss, s.TransportManager.Transport())
  153. if err != nil {
  154. return nil, fmt.Errorf("raft.NewRaft: %v", err)
  155. }
  156. updatePeers := false
  157. if option.RaftBootstrap || len(s.RaftHashicorp.GetConfiguration().Configuration().Servers) == 0 {
  158. cfg := s.AddPeersConfiguration()
  159. // Need to get lock, in case all servers do this at the same time.
  160. peerIdx := getPeerIdx(s.serverAddr, s.peers)
  161. timeSleep := time.Duration(float64(c.LeaderLeaseTimeout) * (rand.Float64()*0.25 + 1) * float64(peerIdx))
  162. glog.V(0).Infof("Bootstrapping idx: %d sleep: %v new cluster: %+v", peerIdx, timeSleep, cfg)
  163. time.Sleep(timeSleep)
  164. f := s.RaftHashicorp.BootstrapCluster(cfg)
  165. if err := f.Error(); err != nil {
  166. return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
  167. }
  168. } else {
  169. updatePeers = true
  170. }
  171. go s.monitorLeaderLoop(updatePeers)
  172. ticker := time.NewTicker(c.HeartbeatTimeout * 10)
  173. if glog.V(4) {
  174. go func() {
  175. for {
  176. select {
  177. case <-ticker.C:
  178. cfuture := s.RaftHashicorp.GetConfiguration()
  179. if err = cfuture.Error(); err != nil {
  180. glog.Fatalf("error getting config: %s", err)
  181. }
  182. configuration := cfuture.Configuration()
  183. glog.V(4).Infof("Showing peers known by %s:\n%+v", s.RaftHashicorp.String(), configuration.Servers)
  184. }
  185. }
  186. }()
  187. }
  188. // Configure a prometheus sink as the raft metrics sink
  189. if sink, err := prometheus.NewPrometheusSinkFrom(prometheus.PrometheusOpts{
  190. Registerer: stats.Gather,
  191. }); err != nil {
  192. return nil, fmt.Errorf("NewPrometheusSink: %v", err)
  193. } else {
  194. metricsConf := metrics.DefaultConfig(stats.Namespace)
  195. metricsConf.EnableRuntimeMetrics = false
  196. if _, err = metrics.NewGlobal(metricsConf, sink); err != nil {
  197. return nil, fmt.Errorf("metrics.NewGlobal: %v", err)
  198. }
  199. }
  200. return s, nil
  201. }