Chris Lu
3 years ago
committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1816 additions and 251 deletions
-
3docker/Makefile
-
89docker/compose/local-hashicorp-raft-compose.yml
-
11go.mod
-
55go.sum
-
18weed/cluster/cluster.go
-
27weed/command/master.go
-
4weed/pb/filer_pb/filer.pb.go
-
1weed/pb/filer_pb/filer_grpc.pb.go
-
4weed/pb/iam_pb/iam.pb.go
-
1weed/pb/iam_pb/iam_grpc.pb.go
-
32weed/pb/master.proto
-
805weed/pb/master_pb/master.pb.go
-
109weed/pb/master_pb/master_grpc.pb.go
-
4weed/pb/messaging_pb/messaging.pb.go
-
1weed/pb/messaging_pb/messaging_grpc.pb.go
-
4weed/pb/mount_pb/mount.pb.go
-
1weed/pb/mount_pb/mount_grpc.pb.go
-
4weed/pb/remote_pb/remote.pb.go
-
4weed/pb/volume_server_pb/volume_server.pb.go
-
1weed/pb/volume_server_pb/volume_server_grpc.pb.go
-
66weed/server/master_grpc_server_raft.go
-
112weed/server/master_server.go
-
22weed/server/master_server_handlers_ui.go
-
121weed/server/master_ui/masterNewRaft.html
-
4weed/server/master_ui/templates.go
-
183weed/server/raft_hashicorp.go
-
48weed/server/raft_server.go
-
8weed/server/raft_server_handlers.go
-
59weed/shell/command_cluster_raft_add.go
-
51weed/shell/command_cluster_raft_ps.go
-
56weed/shell/command_cluster_raft_remove.go
-
20weed/topology/cluster_commands.go
-
19weed/topology/topology.go
@ -0,0 +1,89 @@ |
|||
version: '2' |
|||
|
|||
services: |
|||
master0: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 9333:9333 |
|||
- 19333:19333 |
|||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data" |
|||
volumes: |
|||
- ./master/0:/data |
|||
environment: |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 |
|||
master1: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 9334:9334 |
|||
- 19334:19334 |
|||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data" |
|||
volumes: |
|||
- ./master/1:/data |
|||
environment: |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 |
|||
master2: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 9335:9335 |
|||
- 19335:19335 |
|||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data" |
|||
volumes: |
|||
- ./master/2:/data |
|||
environment: |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 |
|||
volume1: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8080:8080 |
|||
- 18080:18080 |
|||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
volume2: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8082:8082 |
|||
- 18082:18082 |
|||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
volume3: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8083:8083 |
|||
- 18083:18083 |
|||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
filer: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8888:8888 |
|||
- 18888:18888 |
|||
- 8111:8111 |
|||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
- volume1 |
|||
- volume2 |
|||
s3: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8333:8333 |
|||
command: '-v=9 s3 -ip.bind="s3" -filer="filer:8888"' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
- volume1 |
|||
- volume2 |
|||
- filer |
@ -0,0 +1,66 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/cluster" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"github.com/hashicorp/raft" |
|||
) |
|||
|
|||
func (ms *MasterServer) RaftListClusterServers(ctx context.Context, req *master_pb.RaftListClusterServersRequest) (*master_pb.RaftListClusterServersResponse, error) { |
|||
resp := &master_pb.RaftListClusterServersResponse{} |
|||
|
|||
servers := ms.Topo.HashicorpRaft.GetConfiguration().Configuration().Servers |
|||
|
|||
for _, server := range servers { |
|||
resp.ClusterServers = append(resp.ClusterServers, &master_pb.RaftListClusterServersResponse_ClusterServers{ |
|||
Id: string(server.ID), |
|||
Address: string(server.Address), |
|||
Suffrage: server.Suffrage.String(), |
|||
}) |
|||
} |
|||
return resp, nil |
|||
} |
|||
|
|||
func (ms *MasterServer) RaftAddServer(ctx context.Context, req *master_pb.RaftAddServerRequest) (*master_pb.RaftAddServerResponse, error) { |
|||
resp := &master_pb.RaftAddServerResponse{} |
|||
if ms.Topo.HashicorpRaft.State() != raft.Leader { |
|||
return nil, fmt.Errorf("raft add server %s failed: %s is no current leader", req.Id, ms.Topo.HashicorpRaft.String()) |
|||
} |
|||
|
|||
var idxFuture raft.IndexFuture |
|||
if req.Voter { |
|||
idxFuture = ms.Topo.HashicorpRaft.AddVoter(raft.ServerID(req.Id), raft.ServerAddress(req.Address), 0, 0) |
|||
} else { |
|||
idxFuture = ms.Topo.HashicorpRaft.AddNonvoter(raft.ServerID(req.Id), raft.ServerAddress(req.Address), 0, 0) |
|||
} |
|||
|
|||
if err := idxFuture.Error(); err != nil { |
|||
return nil, err |
|||
} |
|||
return resp, nil |
|||
} |
|||
|
|||
func (ms *MasterServer) RaftRemoveServer(ctx context.Context, req *master_pb.RaftRemoveServerRequest) (*master_pb.RaftRemoveServerResponse, error) { |
|||
resp := &master_pb.RaftRemoveServerResponse{} |
|||
|
|||
if ms.Topo.HashicorpRaft.State() != raft.Leader { |
|||
return nil, fmt.Errorf("raft remove server %s failed: %s is no current leader", req.Id, ms.Topo.HashicorpRaft.String()) |
|||
} |
|||
|
|||
if !req.Force { |
|||
ms.clientChansLock.RLock() |
|||
_, ok := ms.clientChans[fmt.Sprintf("%s@%s", cluster.MasterType, req.Id)] |
|||
ms.clientChansLock.RUnlock() |
|||
if ok { |
|||
return resp, fmt.Errorf("raft remove server %s failed: client connection to master exists", req.Id) |
|||
} |
|||
} |
|||
|
|||
idxFuture := ms.Topo.HashicorpRaft.RemoveServer(raft.ServerID(req.Id), 0, 0) |
|||
if err := idxFuture.Error(); err != nil { |
|||
return nil, err |
|||
} |
|||
return resp, nil |
|||
} |
@ -0,0 +1,121 @@ |
|||
<!DOCTYPE html> |
|||
<html> |
|||
<head> |
|||
<title>SeaweedFS {{ .Version }}</title> |
|||
<link rel="stylesheet" href="/seaweedfsstatic/bootstrap/3.3.1/css/bootstrap.min.css"> |
|||
</head> |
|||
<body> |
|||
<div class="container"> |
|||
<div class="page-header"> |
|||
<h1> |
|||
<a href="https://github.com/chrislusf/seaweedfs"><img src="/seaweedfsstatic/seaweed50x50.png"></img></a> |
|||
SeaweedFS <small>{{ .Version }}</small> |
|||
</h1> |
|||
</div> |
|||
|
|||
<div class="row"> |
|||
<div class="col-sm-6"> |
|||
<h2>Cluster status</h2> |
|||
<table class="table table-condensed table-striped"> |
|||
<tbody> |
|||
<tr> |
|||
<th>Volume Size Limit</th> |
|||
<td>{{ .VolumeSizeLimitMB }}MB</td> |
|||
</tr> |
|||
<tr> |
|||
<th>Free</th> |
|||
<td>{{ .Topology.Free }}</td> |
|||
</tr> |
|||
<tr> |
|||
<th>Max</th> |
|||
<td>{{ .Topology.Max }}</td> |
|||
</tr> |
|||
{{ with .RaftServer }} |
|||
<tr> |
|||
<th>Leader</th> |
|||
<td><a href="http://{{ .Leader }}">{{ .Leader }}</a></td> |
|||
</tr> |
|||
<tr> |
|||
<th>Other Masters</th> |
|||
<td class="col-sm-5"> |
|||
<ul class="list-unstyled"> |
|||
{{ range $k, $p := .GetConfiguration.Configuration.Servers }} |
|||
<li><a href="http://{{ $p.ID }}/ui/index.html">{{ $p.ID }}</a></li> |
|||
{{ end }} |
|||
</ul> |
|||
</td> |
|||
</tr> |
|||
{{ end }} |
|||
</tbody> |
|||
</table> |
|||
</div> |
|||
|
|||
<div class="col-sm-6"> |
|||
<h2>System Stats</h2> |
|||
<table class="table table-condensed table-striped"> |
|||
<tr> |
|||
<th>Concurrent Connections</th> |
|||
<td>{{ .Counters.Connections.WeekCounter.Sum }}</td> |
|||
</tr> |
|||
{{ range $key, $val := .Stats }} |
|||
<tr> |
|||
<th>{{ $key }}</th> |
|||
<td>{{ $val }}</td> |
|||
</tr> |
|||
{{ end }} |
|||
</table> |
|||
<h2>Raft Stats</h2> |
|||
<table class="table table-condensed table-striped"> |
|||
<tr> |
|||
<th>applied_index</th> |
|||
<td>{{ .RaftServer.Stats.applied_index }}</td> |
|||
</tr> |
|||
<tr> |
|||
<th>last_log_term</th> |
|||
<td>{{ .RaftServer.Stats.last_log_term }}</td> |
|||
</tr> |
|||
</table> |
|||
</div> |
|||
</div> |
|||
|
|||
<div class="row"> |
|||
<h2>Topology</h2> |
|||
<table class="table table-striped"> |
|||
<thead> |
|||
<tr> |
|||
<th>Data Center</th> |
|||
<th>Rack</th> |
|||
<th>RemoteAddr</th> |
|||
<th>#Volumes</th> |
|||
<th>Volume Ids</th> |
|||
<th>#ErasureCodingShards</th> |
|||
<th>Max</th> |
|||
</tr> |
|||
</thead> |
|||
<tbody> |
|||
{{ range $dc_index, $dc := .Topology.DataCenters }} |
|||
{{ range $rack_index, $rack := $dc.Racks }} |
|||
{{ range $dn_index, $dn := $rack.DataNodes }} |
|||
<tr> |
|||
<td><code>{{ $dc.Id }}</code></td> |
|||
<td>{{ $rack.Id }}</td> |
|||
<td><a href="http://{{ $dn.Url }}/ui/index.html">{{ $dn.Url }}</a> |
|||
{{ if ne $dn.PublicUrl $dn.Url }} |
|||
/ <a href="http://{{ $dn.PublicUrl }}/ui/index.html">{{ $dn.PublicUrl }}</a> |
|||
{{ end }} |
|||
</td> |
|||
<td>{{ $dn.Volumes }}</td> |
|||
<td>{{ $dn.VolumeIds}}</td> |
|||
<td>{{ $dn.EcShards }}</td> |
|||
<td>{{ $dn.Max }}</td> |
|||
</tr> |
|||
{{ end }} |
|||
{{ end }} |
|||
{{ end }} |
|||
</tbody> |
|||
</table> |
|||
</div> |
|||
|
|||
</div> |
|||
</body> |
|||
</html> |
@ -0,0 +1,183 @@ |
|||
package weed_server |
|||
|
|||
// https://yusufs.medium.com/creating-distributed-kv-database-by-implementing-raft-consensus-using-golang-d0884eef2e28
|
|||
// https://github.com/Jille/raft-grpc-example/blob/cd5bcab0218f008e044fbeee4facdd01b06018ad/application.go#L18
|
|||
|
|||
import ( |
|||
"fmt" |
|||
transport "github.com/Jille/raft-grpc-transport" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/hashicorp/raft" |
|||
boltdb "github.com/hashicorp/raft-boltdb" |
|||
"google.golang.org/grpc" |
|||
"math/rand" |
|||
"os" |
|||
"path" |
|||
"path/filepath" |
|||
"sort" |
|||
"strings" |
|||
"time" |
|||
) |
|||
|
|||
const ( |
|||
ldbFile = "logs.dat" |
|||
sdbFile = "stable.dat" |
|||
updatePeersTimeout = 15 * time.Minute |
|||
) |
|||
|
|||
func getPeerIdx(self pb.ServerAddress, mapPeers map[string]pb.ServerAddress) int { |
|||
peers := make([]pb.ServerAddress, 0, len(mapPeers)) |
|||
for _, peer := range mapPeers { |
|||
peers = append(peers, peer) |
|||
} |
|||
sort.Slice(peers, func(i, j int) bool { |
|||
return strings.Compare(string(peers[i]), string(peers[j])) < 0 |
|||
}) |
|||
for i, peer := range peers { |
|||
if string(peer) == string(self) { |
|||
return i |
|||
} |
|||
} |
|||
return -1 |
|||
} |
|||
|
|||
func (s *RaftServer) AddPeersConfiguration() (cfg raft.Configuration) { |
|||
for _, peer := range s.peers { |
|||
cfg.Servers = append(cfg.Servers, raft.Server{ |
|||
Suffrage: raft.Voter, |
|||
ID: raft.ServerID(peer), |
|||
Address: raft.ServerAddress(peer.ToGrpcAddress()), |
|||
}) |
|||
} |
|||
return cfg |
|||
} |
|||
|
|||
func (s *RaftServer) UpdatePeers() { |
|||
for { |
|||
select { |
|||
case isLeader := <-s.RaftHashicorp.LeaderCh(): |
|||
if isLeader { |
|||
peerLeader := string(s.serverAddr) |
|||
existsPeerName := make(map[string]bool) |
|||
for _, server := range s.RaftHashicorp.GetConfiguration().Configuration().Servers { |
|||
if string(server.ID) == peerLeader { |
|||
continue |
|||
} |
|||
existsPeerName[string(server.ID)] = true |
|||
} |
|||
for _, peer := range s.peers { |
|||
peerName := string(peer) |
|||
if peerName == peerLeader || existsPeerName[peerName] { |
|||
continue |
|||
} |
|||
glog.V(0).Infof("adding new peer: %s", peerName) |
|||
s.RaftHashicorp.AddVoter( |
|||
raft.ServerID(peerName), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0) |
|||
} |
|||
for peer, _ := range existsPeerName { |
|||
if _, found := s.peers[peer]; !found { |
|||
glog.V(0).Infof("removing old peer: %s", peer) |
|||
s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0) |
|||
} |
|||
} |
|||
if _, found := s.peers[peerLeader]; !found { |
|||
glog.V(0).Infof("removing old leader peer: %s", peerLeader) |
|||
s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0) |
|||
} |
|||
} |
|||
return |
|||
case <-time.After(updatePeersTimeout): |
|||
return |
|||
} |
|||
} |
|||
} |
|||
|
|||
func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) { |
|||
s := &RaftServer{ |
|||
peers: option.Peers, |
|||
serverAddr: option.ServerAddr, |
|||
dataDir: option.DataDir, |
|||
topo: option.Topo, |
|||
} |
|||
|
|||
c := raft.DefaultConfig() |
|||
c.LocalID = raft.ServerID(s.serverAddr) // TODO maybee the IP:port address will change
|
|||
c.HeartbeatTimeout = time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1)) |
|||
c.ElectionTimeout = option.ElectionTimeout |
|||
if c.LeaderLeaseTimeout > c.HeartbeatTimeout { |
|||
c.LeaderLeaseTimeout = c.HeartbeatTimeout |
|||
} |
|||
if glog.V(4) { |
|||
c.LogLevel = "Debug" |
|||
} else if glog.V(2) { |
|||
c.LogLevel = "Info" |
|||
} else if glog.V(1) { |
|||
c.LogLevel = "Warn" |
|||
} else if glog.V(0) { |
|||
c.LogLevel = "Error" |
|||
} |
|||
|
|||
if option.RaftBootstrap { |
|||
os.RemoveAll(path.Join(s.dataDir, ldbFile)) |
|||
os.RemoveAll(path.Join(s.dataDir, sdbFile)) |
|||
os.RemoveAll(path.Join(s.dataDir, "snapshot")) |
|||
} |
|||
baseDir := s.dataDir |
|||
|
|||
ldb, err := boltdb.NewBoltStore(filepath.Join(baseDir, ldbFile)) |
|||
if err != nil { |
|||
return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "logs.dat"), err) |
|||
} |
|||
|
|||
sdb, err := boltdb.NewBoltStore(filepath.Join(baseDir, sdbFile)) |
|||
if err != nil { |
|||
return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "stable.dat"), err) |
|||
} |
|||
|
|||
fss, err := raft.NewFileSnapshotStore(baseDir, 3, os.Stderr) |
|||
if err != nil { |
|||
return nil, fmt.Errorf(`raft.NewFileSnapshotStore(%q, ...): %v`, baseDir, err) |
|||
} |
|||
|
|||
s.TransportManager = transport.New(raft.ServerAddress(s.serverAddr), []grpc.DialOption{option.GrpcDialOption}) |
|||
|
|||
stateMachine := StateMachine{topo: option.Topo} |
|||
s.RaftHashicorp, err = raft.NewRaft(c, &stateMachine, ldb, sdb, fss, s.TransportManager.Transport()) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("raft.NewRaft: %v", err) |
|||
} |
|||
if option.RaftBootstrap || len(s.RaftHashicorp.GetConfiguration().Configuration().Servers) == 0 { |
|||
cfg := s.AddPeersConfiguration() |
|||
// Need to get lock, in case all servers do this at the same time.
|
|||
peerIdx := getPeerIdx(s.serverAddr, s.peers) |
|||
timeSpeep := time.Duration(float64(c.LeaderLeaseTimeout) * (rand.Float64()*0.25 + 1) * float64(peerIdx)) |
|||
glog.V(0).Infof("Bootstrapping idx: %d sleep: %v new cluster: %+v", peerIdx, timeSpeep, cfg) |
|||
time.Sleep(timeSpeep) |
|||
f := s.RaftHashicorp.BootstrapCluster(cfg) |
|||
if err := f.Error(); err != nil { |
|||
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err) |
|||
} |
|||
} else { |
|||
go s.UpdatePeers() |
|||
} |
|||
|
|||
ticker := time.NewTicker(c.HeartbeatTimeout * 10) |
|||
if glog.V(4) { |
|||
go func() { |
|||
for { |
|||
select { |
|||
case <-ticker.C: |
|||
cfuture := s.RaftHashicorp.GetConfiguration() |
|||
if err = cfuture.Error(); err != nil { |
|||
glog.Fatalf("error getting config: %s", err) |
|||
} |
|||
configuration := cfuture.Configuration() |
|||
glog.V(4).Infof("Showing peers known by %s:\n%+v", s.RaftHashicorp.String(), configuration.Servers) |
|||
} |
|||
} |
|||
}() |
|||
} |
|||
|
|||
return s, nil |
|||
} |
@ -0,0 +1,59 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"context" |
|||
"flag" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"io" |
|||
) |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandRaftServerAdd{}) |
|||
} |
|||
|
|||
type commandRaftServerAdd struct { |
|||
} |
|||
|
|||
func (c *commandRaftServerAdd) Name() string { |
|||
return "cluster.raft.add" |
|||
} |
|||
|
|||
func (c *commandRaftServerAdd) Help() string { |
|||
return `add a server to the raft cluster |
|||
|
|||
Example: |
|||
cluster.raft.add -id <server_name> -address <server_host:port> -voter |
|||
` |
|||
} |
|||
|
|||
func (c *commandRaftServerAdd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
|
|||
raftServerAddCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
serverId := raftServerAddCommand.String("id", "", "server id") |
|||
serverAddress := raftServerAddCommand.String("address", "", "server grpc address") |
|||
serverVoter := raftServerAddCommand.Bool("voter", true, "assign it a vote") |
|||
if err = raftServerAddCommand.Parse(args); err != nil { |
|||
return nil |
|||
} |
|||
|
|||
if *serverId == "" || *serverAddress == "" { |
|||
return fmt.Errorf("empty server id or address") |
|||
} |
|||
|
|||
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { |
|||
_, err := client.RaftAddServer(context.Background(), &master_pb.RaftAddServerRequest{ |
|||
Id: *serverId, |
|||
Address: *serverAddress, |
|||
Voter: *serverVoter, |
|||
}) |
|||
if err != nil { |
|||
return fmt.Errorf("raft add server: %v", err) |
|||
} |
|||
println("added server", *serverId) |
|||
return nil |
|||
}) |
|||
|
|||
return err |
|||
|
|||
} |
@ -0,0 +1,51 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"context" |
|||
"flag" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"io" |
|||
) |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandRaftClusterPs{}) |
|||
} |
|||
|
|||
type commandRaftClusterPs struct { |
|||
} |
|||
|
|||
func (c *commandRaftClusterPs) Name() string { |
|||
return "cluster.raft.ps" |
|||
} |
|||
|
|||
func (c *commandRaftClusterPs) Help() string { |
|||
return `check current raft cluster status |
|||
|
|||
cluster.raft.ps |
|||
` |
|||
} |
|||
|
|||
func (c *commandRaftClusterPs) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
|
|||
raftClusterPsCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
if err = raftClusterPsCommand.Parse(args); err != nil { |
|||
return nil |
|||
} |
|||
|
|||
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { |
|||
resp, err := client.RaftListClusterServers(context.Background(), &master_pb.RaftListClusterServersRequest{}) |
|||
if err != nil { |
|||
return fmt.Errorf("raft list cluster: %v", err) |
|||
} |
|||
fmt.Fprintf(writer, "the raft cluster has %d servers\n", len(resp.ClusterServers)) |
|||
for _, server := range resp.ClusterServers { |
|||
fmt.Fprintf(writer, " * %s %s (%s)\n", server.Id, server.Address, server.Suffrage) |
|||
} |
|||
|
|||
return nil |
|||
}) |
|||
|
|||
return err |
|||
|
|||
} |
@ -0,0 +1,56 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"context" |
|||
"flag" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"io" |
|||
) |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandRaftServerRemove{}) |
|||
} |
|||
|
|||
type commandRaftServerRemove struct { |
|||
} |
|||
|
|||
func (c *commandRaftServerRemove) Name() string { |
|||
return "cluster.raft.remove" |
|||
} |
|||
|
|||
func (c *commandRaftServerRemove) Help() string { |
|||
return `remove a server from the raft cluster |
|||
|
|||
Example: |
|||
cluster.raft.remove -id <server_name> |
|||
` |
|||
} |
|||
|
|||
func (c *commandRaftServerRemove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
|
|||
raftServerAddCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
serverId := raftServerAddCommand.String("id", "", "server id") |
|||
if err = raftServerAddCommand.Parse(args); err != nil { |
|||
return nil |
|||
} |
|||
|
|||
if *serverId == "" { |
|||
return fmt.Errorf("empty server id") |
|||
} |
|||
|
|||
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { |
|||
_, err := client.RaftRemoveServer(context.Background(), &master_pb.RaftRemoveServerRequest{ |
|||
Id: *serverId, |
|||
Force: true, |
|||
}) |
|||
if err != nil { |
|||
return fmt.Errorf("raft remove server: %v", err) |
|||
} |
|||
println("removed server", *serverId) |
|||
return nil |
|||
}) |
|||
|
|||
return err |
|||
|
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue