Browse Source
Merge branch 'new_master' into hashicorp_raft
Merge branch 'new_master' into hashicorp_raft
# Conflicts: # go.mod # go.sumpull/2868/head
Konstantin Lebedev
3 years ago
44 changed files with 2635 additions and 1217 deletions
-
9.github/workflows/binaries_dev.yml
-
5.github/workflows/binaries_release0.yml
-
5.github/workflows/binaries_release1.yml
-
5.github/workflows/binaries_release2.yml
-
5.github/workflows/binaries_release3.yml
-
2docker/Dockerfile.rocksdb_large
-
52go.mod
-
388go.sum
-
4k8s/helm_charts2/Chart.yaml
-
10other/java/client/src/main/proto/filer.proto
-
7weed/cluster/cluster.go
-
2weed/command/master.go
-
2weed/command/mount.go
-
24weed/command/mount_std.go
-
17weed/mount/page_writer/page_chunk_swapfile.go
-
2weed/mount/weedfs.go
-
17weed/mount/weedfs_grpc_server.go
-
11weed/mount/weedfs_quota.go
-
1weed/pb/Makefile
-
10weed/pb/filer.proto
-
890weed/pb/filer_pb/filer.pb.go
-
36weed/pb/filer_pb/filer_grpc.pb.go
-
9weed/pb/grpc_client_server.go
-
10weed/pb/master.proto
-
393weed/pb/master_pb/master.pb.go
-
36weed/pb/master_pb/master_grpc.pb.go
-
25weed/pb/mount.proto
-
208weed/pb/mount_pb/mount.pb.go
-
101weed/pb/mount_pb/mount_grpc.pb.go
-
2weed/pb/remote_pb/remote.pb.go
-
2weed/pb/server_address.go
-
11weed/pb/volume_server.proto
-
849weed/pb/volume_server_pb/volume_server.pb.go
-
36weed/pb/volume_server_pb/volume_server_grpc.pb.go
-
2weed/s3api/s3api_object_handlers.go
-
126weed/server/filer_grpc_server.go
-
164weed/server/filer_grpc_server_admin.go
-
30weed/server/master_grpc_server_admin.go
-
30weed/server/volume_grpc_admin.go
-
233weed/shell/command_cluster_check.go
-
64weed/shell/command_mount_configure.go
-
2weed/util/constants.go
-
10weed/util/log_buffer/log_buffer.go
-
5weed/wdclient/masterclient.go
@ -1,5 +1,5 @@ |
|||
apiVersion: v1 |
|||
description: SeaweedFS |
|||
name: seaweedfs |
|||
appVersion: "2.96" |
|||
version: "2.96" |
|||
appVersion: "2.97" |
|||
version: "2.97" |
@ -0,0 +1,17 @@ |
|||
package mount |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/mount_pb" |
|||
) |
|||
|
|||
func (wfs *WFS) Configure(ctx context.Context, request *mount_pb.ConfigureRequest) (*mount_pb.ConfigureResponse, error) { |
|||
if wfs.option.Collection == "" { |
|||
return nil, fmt.Errorf("mount quota only works when mounted to a new folder with a collection") |
|||
} |
|||
glog.V(0).Infof("quota changed from %d to %d", wfs.option.Quota, request.CollectionCapacity) |
|||
wfs.option.Quota = request.GetCollectionCapacity() |
|||
return &mount_pb.ConfigureResponse{}, nil |
|||
} |
890
weed/pb/filer_pb/filer.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,25 @@ |
|||
syntax = "proto3"; |
|||
|
|||
package messaging_pb; |
|||
|
|||
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/mount_pb"; |
|||
option java_package = "seaweedfs.client"; |
|||
option java_outer_classname = "MountProto"; |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
service SeaweedMount { |
|||
|
|||
rpc Configure (ConfigureRequest) returns (ConfigureResponse) { |
|||
} |
|||
|
|||
} |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
message ConfigureRequest { |
|||
int64 collection_capacity = 1; |
|||
} |
|||
|
|||
message ConfigureResponse { |
|||
} |
@ -0,0 +1,208 @@ |
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// versions:
|
|||
// protoc-gen-go v1.26.0
|
|||
// protoc v3.17.3
|
|||
// source: mount.proto
|
|||
|
|||
package mount_pb |
|||
|
|||
import ( |
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
reflect "reflect" |
|||
sync "sync" |
|||
) |
|||
|
|||
const ( |
|||
// Verify that this generated code is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) |
|||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) |
|||
) |
|||
|
|||
type ConfigureRequest struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
CollectionCapacity int64 `protobuf:"varint,1,opt,name=collection_capacity,json=collectionCapacity,proto3" json:"collection_capacity,omitempty"` |
|||
} |
|||
|
|||
func (x *ConfigureRequest) Reset() { |
|||
*x = ConfigureRequest{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_mount_proto_msgTypes[0] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *ConfigureRequest) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*ConfigureRequest) ProtoMessage() {} |
|||
|
|||
func (x *ConfigureRequest) ProtoReflect() protoreflect.Message { |
|||
mi := &file_mount_proto_msgTypes[0] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use ConfigureRequest.ProtoReflect.Descriptor instead.
|
|||
func (*ConfigureRequest) Descriptor() ([]byte, []int) { |
|||
return file_mount_proto_rawDescGZIP(), []int{0} |
|||
} |
|||
|
|||
func (x *ConfigureRequest) GetCollectionCapacity() int64 { |
|||
if x != nil { |
|||
return x.CollectionCapacity |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type ConfigureResponse struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
} |
|||
|
|||
func (x *ConfigureResponse) Reset() { |
|||
*x = ConfigureResponse{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_mount_proto_msgTypes[1] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *ConfigureResponse) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*ConfigureResponse) ProtoMessage() {} |
|||
|
|||
func (x *ConfigureResponse) ProtoReflect() protoreflect.Message { |
|||
mi := &file_mount_proto_msgTypes[1] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use ConfigureResponse.ProtoReflect.Descriptor instead.
|
|||
func (*ConfigureResponse) Descriptor() ([]byte, []int) { |
|||
return file_mount_proto_rawDescGZIP(), []int{1} |
|||
} |
|||
|
|||
var File_mount_proto protoreflect.FileDescriptor |
|||
|
|||
var file_mount_proto_rawDesc = []byte{ |
|||
0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, |
|||
0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, 0x43, 0x0a, 0x10, 0x43, |
|||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, |
|||
0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x61, |
|||
0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x6f, |
|||
0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, |
|||
0x22, 0x13, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, |
|||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5e, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, |
|||
0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, |
|||
0x72, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, |
|||
0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, |
|||
0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, |
|||
0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, |
|||
0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, |
|||
0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x4d, 0x6f, 0x75, 0x6e, 0x74, |
|||
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, |
|||
0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, |
|||
0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x6f, |
|||
0x75, 0x6e, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, |
|||
} |
|||
|
|||
var ( |
|||
file_mount_proto_rawDescOnce sync.Once |
|||
file_mount_proto_rawDescData = file_mount_proto_rawDesc |
|||
) |
|||
|
|||
func file_mount_proto_rawDescGZIP() []byte { |
|||
file_mount_proto_rawDescOnce.Do(func() { |
|||
file_mount_proto_rawDescData = protoimpl.X.CompressGZIP(file_mount_proto_rawDescData) |
|||
}) |
|||
return file_mount_proto_rawDescData |
|||
} |
|||
|
|||
var file_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 2) |
|||
var file_mount_proto_goTypes = []interface{}{ |
|||
(*ConfigureRequest)(nil), // 0: messaging_pb.ConfigureRequest
|
|||
(*ConfigureResponse)(nil), // 1: messaging_pb.ConfigureResponse
|
|||
} |
|||
var file_mount_proto_depIdxs = []int32{ |
|||
0, // 0: messaging_pb.SeaweedMount.Configure:input_type -> messaging_pb.ConfigureRequest
|
|||
1, // 1: messaging_pb.SeaweedMount.Configure:output_type -> messaging_pb.ConfigureResponse
|
|||
1, // [1:2] is the sub-list for method output_type
|
|||
0, // [0:1] is the sub-list for method input_type
|
|||
0, // [0:0] is the sub-list for extension type_name
|
|||
0, // [0:0] is the sub-list for extension extendee
|
|||
0, // [0:0] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_mount_proto_init() } |
|||
func file_mount_proto_init() { |
|||
if File_mount_proto != nil { |
|||
return |
|||
} |
|||
if !protoimpl.UnsafeEnabled { |
|||
file_mount_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*ConfigureRequest); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_mount_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*ConfigureResponse); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_mount_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 2, |
|||
NumExtensions: 0, |
|||
NumServices: 1, |
|||
}, |
|||
GoTypes: file_mount_proto_goTypes, |
|||
DependencyIndexes: file_mount_proto_depIdxs, |
|||
MessageInfos: file_mount_proto_msgTypes, |
|||
}.Build() |
|||
File_mount_proto = out.File |
|||
file_mount_proto_rawDesc = nil |
|||
file_mount_proto_goTypes = nil |
|||
file_mount_proto_depIdxs = nil |
|||
} |
@ -0,0 +1,101 @@ |
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
|||
|
|||
package mount_pb |
|||
|
|||
import ( |
|||
context "context" |
|||
grpc "google.golang.org/grpc" |
|||
codes "google.golang.org/grpc/codes" |
|||
status "google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the grpc package it is being compiled against.
|
|||
// Requires gRPC-Go v1.32.0 or later.
|
|||
const _ = grpc.SupportPackageIsVersion7 |
|||
|
|||
// SeaweedMountClient is the client API for SeaweedMount service.
|
|||
//
|
|||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
|||
type SeaweedMountClient interface { |
|||
Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) |
|||
} |
|||
|
|||
type seaweedMountClient struct { |
|||
cc grpc.ClientConnInterface |
|||
} |
|||
|
|||
func NewSeaweedMountClient(cc grpc.ClientConnInterface) SeaweedMountClient { |
|||
return &seaweedMountClient{cc} |
|||
} |
|||
|
|||
func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) { |
|||
out := new(ConfigureResponse) |
|||
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMount/Configure", in, out, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
// SeaweedMountServer is the server API for SeaweedMount service.
|
|||
// All implementations must embed UnimplementedSeaweedMountServer
|
|||
// for forward compatibility
|
|||
type SeaweedMountServer interface { |
|||
Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) |
|||
mustEmbedUnimplementedSeaweedMountServer() |
|||
} |
|||
|
|||
// UnimplementedSeaweedMountServer must be embedded to have forward compatible implementations.
|
|||
type UnimplementedSeaweedMountServer struct { |
|||
} |
|||
|
|||
func (UnimplementedSeaweedMountServer) Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) { |
|||
return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") |
|||
} |
|||
func (UnimplementedSeaweedMountServer) mustEmbedUnimplementedSeaweedMountServer() {} |
|||
|
|||
// UnsafeSeaweedMountServer may be embedded to opt out of forward compatibility for this service.
|
|||
// Use of this interface is not recommended, as added methods to SeaweedMountServer will
|
|||
// result in compilation errors.
|
|||
type UnsafeSeaweedMountServer interface { |
|||
mustEmbedUnimplementedSeaweedMountServer() |
|||
} |
|||
|
|||
func RegisterSeaweedMountServer(s grpc.ServiceRegistrar, srv SeaweedMountServer) { |
|||
s.RegisterService(&SeaweedMount_ServiceDesc, srv) |
|||
} |
|||
|
|||
func _SeaweedMount_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(ConfigureRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(SeaweedMountServer).Configure(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/messaging_pb.SeaweedMount/Configure", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(SeaweedMountServer).Configure(ctx, req.(*ConfigureRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
// SeaweedMount_ServiceDesc is the grpc.ServiceDesc for SeaweedMount service.
|
|||
// It's only intended for direct use with grpc.RegisterService,
|
|||
// and not to be introspected or modified (even as a copy)
|
|||
var SeaweedMount_ServiceDesc = grpc.ServiceDesc{ |
|||
ServiceName: "messaging_pb.SeaweedMount", |
|||
HandlerType: (*SeaweedMountServer)(nil), |
|||
Methods: []grpc.MethodDesc{ |
|||
{ |
|||
MethodName: "Configure", |
|||
Handler: _SeaweedMount_Configure_Handler, |
|||
}, |
|||
}, |
|||
Streams: []grpc.StreamDesc{}, |
|||
Metadata: "mount.proto", |
|||
} |
849
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,164 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/cluster" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) { |
|||
|
|||
var output *master_pb.StatisticsResponse |
|||
|
|||
err = fs.filer.MasterClient.WithClient(false, func(masterClient master_pb.SeaweedClient) error { |
|||
grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{ |
|||
Replication: req.Replication, |
|||
Collection: req.Collection, |
|||
Ttl: req.Ttl, |
|||
DiskType: req.DiskType, |
|||
}) |
|||
if grpcErr != nil { |
|||
return grpcErr |
|||
} |
|||
|
|||
output = grpcResponse |
|||
return nil |
|||
}) |
|||
|
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return &filer_pb.StatisticsResponse{ |
|||
TotalSize: output.TotalSize, |
|||
UsedSize: output.UsedSize, |
|||
FileCount: output.FileCount, |
|||
}, nil |
|||
} |
|||
|
|||
func (fs *FilerServer) Ping(ctx context.Context, req *filer_pb.PingRequest) (resp *filer_pb.PingResponse, pingErr error) { |
|||
resp = &filer_pb.PingResponse{} |
|||
if req.TargetType == cluster.FilerType { |
|||
pingErr = pb.WithFilerClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
_, err := client.Ping(ctx, &filer_pb.PingRequest{}) |
|||
return err |
|||
}) |
|||
} |
|||
if req.TargetType == cluster.VolumeServerType { |
|||
pingErr = pb.WithVolumeServerClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { |
|||
_, err := client.Ping(ctx, &volume_server_pb.PingRequest{}) |
|||
return err |
|||
}) |
|||
} |
|||
if req.TargetType == cluster.MasterType { |
|||
pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client master_pb.SeaweedClient) error { |
|||
_, err := client.Ping(ctx, &master_pb.PingRequest{}) |
|||
return err |
|||
}) |
|||
} |
|||
if pingErr != nil { |
|||
pingErr = fmt.Errorf("ping %s %s: %v", req.TargetType, req.Target, pingErr) |
|||
} |
|||
return |
|||
} |
|||
|
|||
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) { |
|||
|
|||
clusterId, _ := fs.filer.Store.KvGet(context.Background(), []byte("clusterId")) |
|||
|
|||
t := &filer_pb.GetFilerConfigurationResponse{ |
|||
Masters: pb.ToAddressStringsFromMap(fs.option.Masters), |
|||
Collection: fs.option.Collection, |
|||
Replication: fs.option.DefaultReplication, |
|||
MaxMb: uint32(fs.option.MaxMB), |
|||
DirBuckets: fs.filer.DirBucketsPath, |
|||
Cipher: fs.filer.Cipher, |
|||
Signature: fs.filer.Signature, |
|||
MetricsAddress: fs.metricsAddress, |
|||
MetricsIntervalSec: int32(fs.metricsIntervalSec), |
|||
Version: util.Version(), |
|||
ClusterId: string(clusterId), |
|||
} |
|||
|
|||
glog.V(4).Infof("GetFilerConfiguration: %v", t) |
|||
|
|||
return t, nil |
|||
} |
|||
|
|||
func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error { |
|||
|
|||
req, err := stream.Recv() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
clientName := util.JoinHostPort(req.Name, int(req.GrpcPort)) |
|||
m := make(map[string]bool) |
|||
for _, tp := range req.Resources { |
|||
m[tp] = true |
|||
} |
|||
fs.brokersLock.Lock() |
|||
fs.brokers[clientName] = m |
|||
glog.V(0).Infof("+ broker %v", clientName) |
|||
fs.brokersLock.Unlock() |
|||
|
|||
defer func() { |
|||
fs.brokersLock.Lock() |
|||
delete(fs.brokers, clientName) |
|||
glog.V(0).Infof("- broker %v: %v", clientName, err) |
|||
fs.brokersLock.Unlock() |
|||
}() |
|||
|
|||
for { |
|||
if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil { |
|||
glog.V(0).Infof("send broker %v: %+v", clientName, err) |
|||
return err |
|||
} |
|||
// println("replied")
|
|||
|
|||
if _, err := stream.Recv(); err != nil { |
|||
glog.V(0).Infof("recv broker %v: %v", clientName, err) |
|||
return err |
|||
} |
|||
// println("received")
|
|||
} |
|||
|
|||
} |
|||
|
|||
func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) { |
|||
|
|||
resp = &filer_pb.LocateBrokerResponse{} |
|||
|
|||
fs.brokersLock.Lock() |
|||
defer fs.brokersLock.Unlock() |
|||
|
|||
var localBrokers []*filer_pb.LocateBrokerResponse_Resource |
|||
|
|||
for b, m := range fs.brokers { |
|||
if _, found := m[req.Resource]; found { |
|||
resp.Found = true |
|||
resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{ |
|||
{ |
|||
GrpcAddresses: b, |
|||
ResourceCount: int32(len(m)), |
|||
}, |
|||
} |
|||
return |
|||
} |
|||
localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{ |
|||
GrpcAddresses: b, |
|||
ResourceCount: int32(len(m)), |
|||
}) |
|||
} |
|||
|
|||
resp.Resources = localBrokers |
|||
|
|||
return resp, nil |
|||
|
|||
} |
@ -0,0 +1,233 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"context" |
|||
"flag" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/cluster" |
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" |
|||
"io" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
) |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandClusterCheck{}) |
|||
} |
|||
|
|||
type commandClusterCheck struct { |
|||
} |
|||
|
|||
func (c *commandClusterCheck) Name() string { |
|||
return "cluster.check" |
|||
} |
|||
|
|||
func (c *commandClusterCheck) Help() string { |
|||
return `check current cluster network connectivity |
|||
|
|||
cluster.check |
|||
|
|||
` |
|||
} |
|||
|
|||
func (c *commandClusterCheck) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
|
|||
clusterPsCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
if err = clusterPsCommand.Parse(args); err != nil { |
|||
return nil |
|||
} |
|||
|
|||
// collect topology information
|
|||
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(topologyInfo.DiskInfos)) |
|||
|
|||
emptyDiskTypeDiskInfo, emptyDiskTypeFound := topologyInfo.DiskInfos[""] |
|||
hddDiskTypeDiskInfo, hddDiskTypeFound := topologyInfo.DiskInfos["hdd"] |
|||
if !emptyDiskTypeFound && !hddDiskTypeFound || emptyDiskTypeDiskInfo.VolumeCount == 0 && hddDiskTypeDiskInfo.VolumeCount == 0 { |
|||
return fmt.Errorf("Need to a hdd disk type!") |
|||
} |
|||
|
|||
// collect filers
|
|||
var filers []pb.ServerAddress |
|||
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { |
|||
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ |
|||
ClientType: cluster.FilerType, |
|||
}) |
|||
|
|||
for _, node := range resp.ClusterNodes { |
|||
filers = append(filers, pb.ServerAddress(node.Address)) |
|||
} |
|||
return err |
|||
}) |
|||
if err != nil { |
|||
return |
|||
} |
|||
fmt.Fprintf(writer, "the cluster has %d filers: %+v\n", len(filers), filers) |
|||
|
|||
// collect volume servers
|
|||
var volumeServers []pb.ServerAddress |
|||
t, _, err := collectTopologyInfo(commandEnv, 0) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
for _, dc := range t.DataCenterInfos { |
|||
for _, r := range dc.RackInfos { |
|||
for _, dn := range r.DataNodeInfos { |
|||
volumeServers = append(volumeServers, pb.NewServerAddressFromDataNode(dn)) |
|||
} |
|||
} |
|||
} |
|||
fmt.Fprintf(writer, "the cluster has %d volume servers: %+v\n", len(volumeServers), volumeServers) |
|||
|
|||
// collect all masters
|
|||
var masters []pb.ServerAddress |
|||
for _, master := range commandEnv.MasterClient.GetMasters() { |
|||
masters = append(masters, master) |
|||
} |
|||
|
|||
// check from master to volume servers
|
|||
for _, master := range masters { |
|||
for _, volumeServer := range volumeServers { |
|||
fmt.Fprintf(writer, "checking master %s to volume server %s ... ", string(master), string(volumeServer)) |
|||
err := pb.WithMasterClient(false, master, commandEnv.option.GrpcDialOption, func(client master_pb.SeaweedClient) error { |
|||
_, err := client.Ping(context.Background(), &master_pb.PingRequest{ |
|||
Target: string(volumeServer), |
|||
TargetType: cluster.VolumeServerType, |
|||
}) |
|||
return err |
|||
}) |
|||
if err == nil { |
|||
fmt.Fprintf(writer, "ok\n") |
|||
} else { |
|||
fmt.Fprintf(writer, "%v\n", err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// check between masters
|
|||
for _, sourceMaster := range masters { |
|||
for _, targetMaster := range masters { |
|||
if sourceMaster == targetMaster { |
|||
continue |
|||
} |
|||
fmt.Fprintf(writer, "checking master %s to %s ... ", string(sourceMaster), string(targetMaster)) |
|||
err := pb.WithMasterClient(false, sourceMaster, commandEnv.option.GrpcDialOption, func(client master_pb.SeaweedClient) error { |
|||
_, err := client.Ping(context.Background(), &master_pb.PingRequest{ |
|||
Target: string(targetMaster), |
|||
TargetType: cluster.MasterType, |
|||
}) |
|||
return err |
|||
}) |
|||
if err == nil { |
|||
fmt.Fprintf(writer, "ok\n") |
|||
} else { |
|||
fmt.Fprintf(writer, "%v\n", err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// check from volume servers to masters
|
|||
for _, volumeServer := range volumeServers { |
|||
for _, master := range masters { |
|||
fmt.Fprintf(writer, "checking volume server %s to master %s ... ", string(volumeServer), string(master)) |
|||
err := pb.WithVolumeServerClient(false, volumeServer, commandEnv.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { |
|||
_, err := client.Ping(context.Background(), &volume_server_pb.PingRequest{ |
|||
Target: string(master), |
|||
TargetType: cluster.MasterType, |
|||
}) |
|||
return err |
|||
}) |
|||
if err == nil { |
|||
fmt.Fprintf(writer, "ok\n") |
|||
} else { |
|||
fmt.Fprintf(writer, "%v\n", err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// check from filers to masters
|
|||
for _, filer := range filers { |
|||
for _, master := range masters { |
|||
fmt.Fprintf(writer, "checking filer %s to master %s ... ", string(filer), string(master)) |
|||
err := pb.WithFilerClient(false, filer, commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
_, err := client.Ping(context.Background(), &filer_pb.PingRequest{ |
|||
Target: string(master), |
|||
TargetType: cluster.MasterType, |
|||
}) |
|||
return err |
|||
}) |
|||
if err == nil { |
|||
fmt.Fprintf(writer, "ok\n") |
|||
} else { |
|||
fmt.Fprintf(writer, "%v\n", err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// check from filers to volume servers
|
|||
for _, filer := range filers { |
|||
for _, volumeServer := range volumeServers { |
|||
fmt.Fprintf(writer, "checking filer %s to volume server %s ... ", string(filer), string(volumeServer)) |
|||
err := pb.WithFilerClient(false, filer, commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
_, err := client.Ping(context.Background(), &filer_pb.PingRequest{ |
|||
Target: string(volumeServer), |
|||
TargetType: cluster.VolumeServerType, |
|||
}) |
|||
return err |
|||
}) |
|||
if err == nil { |
|||
fmt.Fprintf(writer, "ok\n") |
|||
} else { |
|||
fmt.Fprintf(writer, "%v\n", err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// check between volume servers
|
|||
for _, sourceVolumeServer := range volumeServers { |
|||
for _, targetVolumeServer := range volumeServers { |
|||
if sourceVolumeServer == targetVolumeServer { |
|||
continue |
|||
} |
|||
fmt.Fprintf(writer, "checking volume server %s to %s ... ", string(sourceVolumeServer), string(targetVolumeServer)) |
|||
err := pb.WithVolumeServerClient(false, sourceVolumeServer, commandEnv.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { |
|||
_, err := client.Ping(context.Background(), &volume_server_pb.PingRequest{ |
|||
Target: string(targetVolumeServer), |
|||
TargetType: cluster.VolumeServerType, |
|||
}) |
|||
return err |
|||
}) |
|||
if err == nil { |
|||
fmt.Fprintf(writer, "ok\n") |
|||
} else { |
|||
fmt.Fprintf(writer, "%v\n", err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// check between filers, and need to connect to itself
|
|||
for _, sourceFiler := range filers { |
|||
for _, targetFiler := range filers { |
|||
fmt.Fprintf(writer, "checking filer %s to %s ... ", string(sourceFiler), string(targetFiler)) |
|||
err := pb.WithFilerClient(false, sourceFiler, commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
_, err := client.Ping(context.Background(), &filer_pb.PingRequest{ |
|||
Target: string(targetFiler), |
|||
TargetType: cluster.FilerType, |
|||
}) |
|||
return err |
|||
}) |
|||
if err == nil { |
|||
fmt.Fprintf(writer, "ok\n") |
|||
} else { |
|||
fmt.Fprintf(writer, "%v\n", err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,64 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"context" |
|||
"flag" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/mount_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
"google.golang.org/grpc" |
|||
"google.golang.org/grpc/credentials/insecure" |
|||
_ "google.golang.org/grpc/resolver/passthrough" |
|||
"io" |
|||
) |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandMountConfigure{}) |
|||
} |
|||
|
|||
type commandMountConfigure struct { |
|||
} |
|||
|
|||
func (c *commandMountConfigure) Name() string { |
|||
return "mount.configure" |
|||
} |
|||
|
|||
func (c *commandMountConfigure) Help() string { |
|||
return `configure the mount on current server |
|||
|
|||
mount.configure -dir=<mount_directory> |
|||
|
|||
This command connects with local mount via unix socket, so it can only run locally. |
|||
The "mount_directory" value needs to be exactly the same as how mount was started in "weed mount -dir=<mount_directory>" |
|||
|
|||
` |
|||
} |
|||
|
|||
func (c *commandMountConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
|
|||
mountConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
mountDir := mountConfigureCommand.String("dir", "", "the mount directory same as how \"weed mount -dir=<mount_directory>\" was started") |
|||
mountQuota := mountConfigureCommand.Int("quotaMB", 0, "the quota in MB") |
|||
if err = mountConfigureCommand.Parse(args); err != nil { |
|||
return nil |
|||
} |
|||
|
|||
mountDirHash := util.HashToInt32([]byte(*mountDir)) |
|||
if mountDirHash < 0 { |
|||
mountDirHash = -mountDirHash |
|||
} |
|||
localSocket := fmt.Sprintf("/tmp/seaweefs-mount-%d.sock", mountDirHash) |
|||
|
|||
clientConn, err := grpc.Dial("passthrough:///unix://"+localSocket, grpc.WithTransportCredentials(insecure.NewCredentials())) |
|||
if err != nil { |
|||
return |
|||
} |
|||
defer clientConn.Close() |
|||
|
|||
client := mount_pb.NewSeaweedMountClient(clientConn) |
|||
_, err = client.Configure(context.Background(), &mount_pb.ConfigureRequest{ |
|||
CollectionCapacity: int64(*mountQuota) * 1024 * 1024, |
|||
}) |
|||
|
|||
return |
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue