Browse Source

Merge branch 'master' into refactoring_dat_backend

pull/1115/head
Chris Lu 5 years ago
parent
commit
5b950c735e
  1. 1
      weed/command/mount_darwin.go
  2. 1
      weed/command/mount_freebsd.go
  3. 1
      weed/command/mount_linux.go
  4. 4
      weed/server/master_grpc_server_collection.go
  5. 2
      weed/server/master_grpc_server_volume.go
  6. 12
      weed/server/master_server.go
  7. 2
      weed/server/master_server_handlers.go
  8. 10
      weed/server/master_server_handlers_admin.go
  9. 2
      weed/server/raft_server.go
  10. 6
      weed/storage/volume_vacuum.go
  11. 4
      weed/topology/topology.go

1
weed/command/mount_darwin.go

@ -1,7 +1,6 @@
package command package command
import ( import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
) )

1
weed/command/mount_freebsd.go

@ -1,7 +1,6 @@
package command package command
import ( import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
) )

1
weed/command/mount_linux.go

@ -1,7 +1,6 @@
package command package command
import ( import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
) )

4
weed/server/master_grpc_server_collection.go

@ -57,7 +57,7 @@ func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error {
} }
for _, server := range collection.ListVolumeServers() { for _, server := range collection.ListVolumeServers() {
err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error {
err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
_, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collectionName, Collection: collectionName,
}) })
@ -77,7 +77,7 @@ func (ms *MasterServer) doDeleteEcCollection(collectionName string) error {
listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName) listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName)
for _, server := range listOfEcServers { for _, server := range listOfEcServers {
err := operation.WithVolumeServerClient(server, ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error {
err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
_, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collectionName, Collection: collectionName,
}) })

2
weed/server/master_grpc_server_volume.go

@ -78,7 +78,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
} }
ms.vgLock.Lock() ms.vgLock.Lock()
if !ms.Topo.HasWritableVolume(option) { if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo); err != nil {
ms.vgLock.Unlock() ms.vgLock.Unlock()
return nil, fmt.Errorf("Cannot grow volume group! %v", err) return nil, fmt.Errorf("Cannot grow volume group! %v", err)
} }

12
weed/server/master_server.go

@ -3,9 +3,6 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/shell"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"google.golang.org/grpc"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
"net/url" "net/url"
@ -21,10 +18,13 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/shell"
"github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/topology"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/spf13/viper" "github.com/spf13/viper"
"google.golang.org/grpc"
) )
type MasterOption struct { type MasterOption struct {
@ -57,7 +57,7 @@ type MasterServer struct {
clientChansLock sync.RWMutex clientChansLock sync.RWMutex
clientChans map[string]chan *master_pb.VolumeLocation clientChans map[string]chan *master_pb.VolumeLocation
grpcDialOpiton grpc.DialOption
grpcDialOption grpc.DialOption
MasterClient *wdclient.MasterClient MasterClient *wdclient.MasterClient
} }
@ -83,7 +83,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
option: option, option: option,
preallocateSize: preallocateSize, preallocateSize: preallocateSize,
clientChans: make(map[string]chan *master_pb.VolumeLocation), clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOpiton: grpcDialOption,
grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers), MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers),
} }
ms.bounedLeaderChan = make(chan int, 16) ms.bounedLeaderChan = make(chan int, 16)
@ -112,7 +112,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
r.HandleFunc("/{fileId}", ms.redirectHandler) r.HandleFunc("/{fileId}", ms.redirectHandler)
} }
ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOpiton, ms.option.GarbageThreshold, ms.preallocateSize)
ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOption, ms.option.GarbageThreshold, ms.preallocateSize)
ms.startAdminScripts() ms.startAdminScripts()

2
weed/server/master_server_handlers.go

@ -108,7 +108,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
ms.vgLock.Lock() ms.vgLock.Lock()
defer ms.vgLock.Unlock() defer ms.vgLock.Unlock()
if !ms.Topo.HasWritableVolume(option) { if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo); err != nil {
writeJsonError(w, r, http.StatusInternalServerError, writeJsonError(w, r, http.StatusInternalServerError,
fmt.Errorf("Cannot grow volume group! %v", err)) fmt.Errorf("Cannot grow volume group! %v", err))
return return

10
weed/server/master_server_handlers_admin.go

@ -24,7 +24,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R
return return
} }
for _, server := range collection.ListVolumeServers() { for _, server := range collection.ListVolumeServers() {
err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error {
err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
_, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collection.Name, Collection: collection.Name,
}) })
@ -57,7 +57,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque
} }
} }
glog.Infoln("garbageThreshold =", gcThreshold) glog.Infoln("garbageThreshold =", gcThreshold)
ms.Topo.Vacuum(ms.grpcDialOpiton, gcThreshold, ms.preallocateSize)
ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize)
ms.dirStatusHandler(w, r) ms.dirStatusHandler(w, r)
} }
@ -73,7 +73,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) { if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) {
err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount()) err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount())
} else { } else {
count, err = ms.vg.GrowByCountAndType(ms.grpcDialOpiton, count, option, ms.Topo)
count, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, count, option, ms.Topo)
} }
} else { } else {
err = fmt.Errorf("can not parse parameter count %s", r.FormValue("count")) err = fmt.Errorf("can not parse parameter count %s", r.FormValue("count"))
@ -119,13 +119,13 @@ func (ms *MasterServer) selfUrl(r *http.Request) string {
} }
func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) { func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) {
if ms.Topo.IsLeader() { if ms.Topo.IsLeader() {
submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOpiton)
submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOption)
} else { } else {
masterUrl, err := ms.Topo.Leader() masterUrl, err := ms.Topo.Leader()
if err != nil { if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
} else { } else {
submitForClientHandler(w, r, masterUrl, ms.grpcDialOpiton)
submitForClientHandler(w, r, masterUrl, ms.grpcDialOption)
} }
} }
} }

2
weed/server/raft_server.go

@ -25,7 +25,7 @@ type RaftServer struct {
*raft.GrpcServer *raft.GrpcServer
} }
func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer {
func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer {
s := &RaftServer{ s := &RaftServer{
peers: peers, peers: peers,
serverAddr: serverAddr, serverAddr: serverAddr,

6
weed/storage/volume_vacuum.go

@ -24,7 +24,7 @@ func (v *Volume) garbageLevel() float64 {
func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error { func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error {
if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory
if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory
glog.V(3).Infof("Compacting volume %d ...", v.Id) glog.V(3).Infof("Compacting volume %d ...", v.Id)
//no need to lock for copy on write //no need to lock for copy on write
//v.accessLock.Lock() //v.accessLock.Lock()
@ -47,7 +47,7 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
func (v *Volume) Compact2() error { func (v *Volume) Compact2() error {
if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory
if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory
glog.V(3).Infof("Compact2 volume %d ...", v.Id) glog.V(3).Infof("Compact2 volume %d ...", v.Id)
v.isCompacting = true v.isCompacting = true
@ -64,7 +64,7 @@ func (v *Volume) Compact2() error {
} }
func (v *Volume) CommitCompact() error { func (v *Volume) CommitCompact() error {
if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory
if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory
glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) glog.V(0).Infof("Committing volume %d vacuuming...", v.Id)
v.isCompacting = true v.isCompacting = true

4
weed/topology/topology.go

@ -120,10 +120,10 @@ func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) { func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) {
vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option) vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option)
if err != nil { if err != nil {
return "", 0, nil, fmt.Errorf("failed to find writable volumes for collectio:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
return "", 0, nil, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
} }
if datanodes.Length() == 0 { if datanodes.Length() == 0 {
return "", 0, nil, fmt.Errorf("no writable volumes available for for collectio:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
return "", 0, nil, fmt.Errorf("no writable volumes available for collection:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
} }
fileId, count := t.Sequence.NextFileId(count) fileId, count := t.Sequence.NextFileId(count)
return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil

Loading…
Cancel
Save