Browse Source
Merge branch 'master' of https://github.com/chrislusf/seaweedfs into chrislusf-master
pull/2939/head
Merge branch 'master' of https://github.com/chrislusf/seaweedfs into chrislusf-master
pull/2939/head
guosj
3 years ago
149 changed files with 6049 additions and 2198 deletions
-
9.github/workflows/binaries_dev.yml
-
5.github/workflows/binaries_release0.yml
-
5.github/workflows/binaries_release1.yml
-
5.github/workflows/binaries_release2.yml
-
5.github/workflows/binaries_release3.yml
-
2.github/workflows/container_dev.yml
-
2.github/workflows/container_latest.yml
-
2.github/workflows/container_release1.yml
-
2.github/workflows/container_release2.yml
-
2.github/workflows/container_release3.yml
-
4README.md
-
2docker/Dockerfile.go_build
-
2docker/Dockerfile.go_build_large
-
4docker/Dockerfile.rocksdb_large
-
3docker/Makefile
-
89docker/compose/local-hashicorp-raft-compose.yml
-
4docker/compose/local-s3tests-compose.yml
-
153go.mod
-
730go.sum
-
4k8s/helm_charts2/Chart.yaml
-
32k8s/helm_charts2/templates/filer-statefulset.yaml
-
6k8s/helm_charts2/values.yaml
-
13other/java/client/src/main/proto/filer.proto
-
2other/java/hdfs-over-ftp/pom.xml
-
2other/java/hdfs3/pom.xml
-
25weed/cluster/cluster.go
-
2weed/command/benchmark.go
-
37weed/command/filer.go
-
5weed/command/filer_sync.go
-
2weed/command/iam.go
-
60weed/command/master.go
-
2weed/command/master_follower.go
-
2weed/command/mount.go
-
24weed/command/mount_std.go
-
41weed/command/s3.go
-
3weed/command/server.go
-
21weed/filer/filechunks.go
-
10weed/filer/filechunks2_test.go
-
17weed/filer/filechunks_read.go
-
2weed/filer/filer.go
-
3weed/filer/meta_aggregator.go
-
6weed/filer/redis/universal_redis_store.go
-
13weed/filer/stream.go
-
32weed/iamapi/iamapi_management_handlers.go
-
5weed/iamapi/iamapi_response.go
-
2weed/iamapi/iamapi_server.go
-
21weed/iamapi/iamapi_test.go
-
6weed/mount/filehandle.go
-
1weed/mount/filehandle_map.go
-
17weed/mount/page_writer/page_chunk_swapfile.go
-
3weed/mount/page_writer/upload_pipeline.go
-
5weed/mount/weedfs.go
-
9weed/mount/weedfs_attr.go
-
1weed/mount/weedfs_filehandle.go
-
17weed/mount/weedfs_grpc_server.go
-
11weed/mount/weedfs_quota.go
-
1weed/pb/Makefile
-
13weed/pb/filer.proto
-
920weed/pb/filer_pb/filer.pb.go
-
37weed/pb/filer_pb/filer_grpc.pb.go
-
11weed/pb/grpc_client_server.go
-
4weed/pb/iam_pb/iam.pb.go
-
1weed/pb/iam_pb/iam_grpc.pb.go
-
49weed/pb/master.proto
-
1620weed/pb/master_pb/master.pb.go
-
145weed/pb/master_pb/master_grpc.pb.go
-
4weed/pb/messaging_pb/messaging.pb.go
-
1weed/pb/messaging_pb/messaging_grpc.pb.go
-
25weed/pb/mount.proto
-
208weed/pb/mount_pb/mount.pb.go
-
100weed/pb/mount_pb/mount_grpc.pb.go
-
4weed/pb/remote_pb/remote.pb.go
-
15weed/pb/server_address.go
-
14weed/pb/volume_server.proto
-
881weed/pb/volume_server_pb/volume_server.pb.go
-
37weed/pb/volume_server_pb/volume_server_grpc.pb.go
-
28weed/remote_storage/remote_storage.go
-
27weed/s3api/filer_multipart.go
-
6weed/s3api/filer_multipart_test.go
-
20weed/s3api/s3api_bucket_handlers.go
-
17weed/s3api/s3api_object_handlers.go
-
44weed/s3api/s3api_object_multipart_handlers.go
-
17weed/s3api/s3api_server.go
-
6weed/s3api/s3err/s3api_errors.go
-
126weed/server/filer_grpc_server.go
-
177weed/server/filer_grpc_server_admin.go
-
2weed/server/filer_server.go
-
6weed/server/filer_server_handlers_read_dir.go
-
4weed/server/filer_server_handlers_tagging.go
-
12weed/server/filer_server_handlers_write_autochunk.go
-
11weed/server/filer_server_handlers_write_merge.go
-
8weed/server/filer_server_handlers_write_upload.go
-
6weed/server/filer_ui/breadcrumb.go
-
247weed/server/filer_ui/filer.html
-
15weed/server/master_grpc_server.go
-
42weed/server/master_grpc_server_admin.go
-
66weed/server/master_grpc_server_raft.go
-
2weed/server/master_grpc_server_volume.go
-
164weed/server/master_server.go
-
2weed/server/master_server_handlers_admin.go
@ -0,0 +1,89 @@ |
|||
version: '2' |
|||
|
|||
services: |
|||
master0: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 9333:9333 |
|||
- 19333:19333 |
|||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data" |
|||
volumes: |
|||
- ./master/0:/data |
|||
environment: |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 |
|||
master1: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 9334:9334 |
|||
- 19334:19334 |
|||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data" |
|||
volumes: |
|||
- ./master/1:/data |
|||
environment: |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 |
|||
master2: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 9335:9335 |
|||
- 19335:19335 |
|||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data" |
|||
volumes: |
|||
- ./master/2:/data |
|||
environment: |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 |
|||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 |
|||
volume1: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8080:8080 |
|||
- 18080:18080 |
|||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
volume2: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8082:8082 |
|||
- 18082:18082 |
|||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
volume3: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8083:8083 |
|||
- 18083:18083 |
|||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
filer: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8888:8888 |
|||
- 18888:18888 |
|||
- 8111:8111 |
|||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
- volume1 |
|||
- volume2 |
|||
s3: |
|||
image: chrislusf/seaweedfs:local |
|||
ports: |
|||
- 8333:8333 |
|||
command: '-v=9 s3 -ip.bind="s3" -filer="filer:8888"' |
|||
depends_on: |
|||
- master0 |
|||
- master1 |
|||
- volume1 |
|||
- volume2 |
|||
- filer |
730
go.sum
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -1,5 +1,5 @@ |
|||
apiVersion: v1 |
|||
description: SeaweedFS |
|||
name: seaweedfs |
|||
appVersion: "2.95" |
|||
version: "2.95" |
|||
appVersion: "2.99" |
|||
version: "2.99" |
@ -0,0 +1,17 @@ |
|||
package mount |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/mount_pb" |
|||
) |
|||
|
|||
func (wfs *WFS) Configure(ctx context.Context, request *mount_pb.ConfigureRequest) (*mount_pb.ConfigureResponse, error) { |
|||
if wfs.option.Collection == "" { |
|||
return nil, fmt.Errorf("mount quota only works when mounted to a new folder with a collection") |
|||
} |
|||
glog.V(0).Infof("quota changed from %d to %d", wfs.option.Quota, request.CollectionCapacity) |
|||
wfs.option.Quota = request.GetCollectionCapacity() |
|||
return &mount_pb.ConfigureResponse{}, nil |
|||
} |
920
weed/pb/filer_pb/filer.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
1620
weed/pb/master_pb/master.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,25 @@ |
|||
syntax = "proto3"; |
|||
|
|||
package messaging_pb; |
|||
|
|||
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/mount_pb"; |
|||
option java_package = "seaweedfs.client"; |
|||
option java_outer_classname = "MountProto"; |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
service SeaweedMount { |
|||
|
|||
rpc Configure (ConfigureRequest) returns (ConfigureResponse) { |
|||
} |
|||
|
|||
} |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
message ConfigureRequest { |
|||
int64 collection_capacity = 1; |
|||
} |
|||
|
|||
message ConfigureResponse { |
|||
} |
@ -0,0 +1,208 @@ |
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|||
// versions:
|
|||
// protoc-gen-go v1.28.0
|
|||
// protoc v3.19.4
|
|||
// source: mount.proto
|
|||
|
|||
package mount_pb |
|||
|
|||
import ( |
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
|||
reflect "reflect" |
|||
sync "sync" |
|||
) |
|||
|
|||
const ( |
|||
// Verify that this generated code is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) |
|||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) |
|||
) |
|||
|
|||
type ConfigureRequest struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
|
|||
CollectionCapacity int64 `protobuf:"varint,1,opt,name=collection_capacity,json=collectionCapacity,proto3" json:"collection_capacity,omitempty"` |
|||
} |
|||
|
|||
func (x *ConfigureRequest) Reset() { |
|||
*x = ConfigureRequest{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_mount_proto_msgTypes[0] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *ConfigureRequest) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*ConfigureRequest) ProtoMessage() {} |
|||
|
|||
func (x *ConfigureRequest) ProtoReflect() protoreflect.Message { |
|||
mi := &file_mount_proto_msgTypes[0] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use ConfigureRequest.ProtoReflect.Descriptor instead.
|
|||
func (*ConfigureRequest) Descriptor() ([]byte, []int) { |
|||
return file_mount_proto_rawDescGZIP(), []int{0} |
|||
} |
|||
|
|||
func (x *ConfigureRequest) GetCollectionCapacity() int64 { |
|||
if x != nil { |
|||
return x.CollectionCapacity |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type ConfigureResponse struct { |
|||
state protoimpl.MessageState |
|||
sizeCache protoimpl.SizeCache |
|||
unknownFields protoimpl.UnknownFields |
|||
} |
|||
|
|||
func (x *ConfigureResponse) Reset() { |
|||
*x = ConfigureResponse{} |
|||
if protoimpl.UnsafeEnabled { |
|||
mi := &file_mount_proto_msgTypes[1] |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
} |
|||
|
|||
func (x *ConfigureResponse) String() string { |
|||
return protoimpl.X.MessageStringOf(x) |
|||
} |
|||
|
|||
func (*ConfigureResponse) ProtoMessage() {} |
|||
|
|||
func (x *ConfigureResponse) ProtoReflect() protoreflect.Message { |
|||
mi := &file_mount_proto_msgTypes[1] |
|||
if protoimpl.UnsafeEnabled && x != nil { |
|||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) |
|||
if ms.LoadMessageInfo() == nil { |
|||
ms.StoreMessageInfo(mi) |
|||
} |
|||
return ms |
|||
} |
|||
return mi.MessageOf(x) |
|||
} |
|||
|
|||
// Deprecated: Use ConfigureResponse.ProtoReflect.Descriptor instead.
|
|||
func (*ConfigureResponse) Descriptor() ([]byte, []int) { |
|||
return file_mount_proto_rawDescGZIP(), []int{1} |
|||
} |
|||
|
|||
var File_mount_proto protoreflect.FileDescriptor |
|||
|
|||
var file_mount_proto_rawDesc = []byte{ |
|||
0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, |
|||
0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, 0x43, 0x0a, 0x10, 0x43, |
|||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, |
|||
0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x61, |
|||
0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x6f, |
|||
0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, |
|||
0x22, 0x13, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, |
|||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5e, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, |
|||
0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, |
|||
0x72, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, |
|||
0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, |
|||
0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, |
|||
0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, |
|||
0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, |
|||
0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x4d, 0x6f, 0x75, 0x6e, 0x74, |
|||
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, |
|||
0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, |
|||
0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x6f, |
|||
0x75, 0x6e, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, |
|||
} |
|||
|
|||
var ( |
|||
file_mount_proto_rawDescOnce sync.Once |
|||
file_mount_proto_rawDescData = file_mount_proto_rawDesc |
|||
) |
|||
|
|||
func file_mount_proto_rawDescGZIP() []byte { |
|||
file_mount_proto_rawDescOnce.Do(func() { |
|||
file_mount_proto_rawDescData = protoimpl.X.CompressGZIP(file_mount_proto_rawDescData) |
|||
}) |
|||
return file_mount_proto_rawDescData |
|||
} |
|||
|
|||
var file_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 2) |
|||
var file_mount_proto_goTypes = []interface{}{ |
|||
(*ConfigureRequest)(nil), // 0: messaging_pb.ConfigureRequest
|
|||
(*ConfigureResponse)(nil), // 1: messaging_pb.ConfigureResponse
|
|||
} |
|||
var file_mount_proto_depIdxs = []int32{ |
|||
0, // 0: messaging_pb.SeaweedMount.Configure:input_type -> messaging_pb.ConfigureRequest
|
|||
1, // 1: messaging_pb.SeaweedMount.Configure:output_type -> messaging_pb.ConfigureResponse
|
|||
1, // [1:2] is the sub-list for method output_type
|
|||
0, // [0:1] is the sub-list for method input_type
|
|||
0, // [0:0] is the sub-list for extension type_name
|
|||
0, // [0:0] is the sub-list for extension extendee
|
|||
0, // [0:0] is the sub-list for field type_name
|
|||
} |
|||
|
|||
func init() { file_mount_proto_init() } |
|||
func file_mount_proto_init() { |
|||
if File_mount_proto != nil { |
|||
return |
|||
} |
|||
if !protoimpl.UnsafeEnabled { |
|||
file_mount_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*ConfigureRequest); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
file_mount_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { |
|||
switch v := v.(*ConfigureResponse); i { |
|||
case 0: |
|||
return &v.state |
|||
case 1: |
|||
return &v.sizeCache |
|||
case 2: |
|||
return &v.unknownFields |
|||
default: |
|||
return nil |
|||
} |
|||
} |
|||
} |
|||
type x struct{} |
|||
out := protoimpl.TypeBuilder{ |
|||
File: protoimpl.DescBuilder{ |
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
|||
RawDescriptor: file_mount_proto_rawDesc, |
|||
NumEnums: 0, |
|||
NumMessages: 2, |
|||
NumExtensions: 0, |
|||
NumServices: 1, |
|||
}, |
|||
GoTypes: file_mount_proto_goTypes, |
|||
DependencyIndexes: file_mount_proto_depIdxs, |
|||
MessageInfos: file_mount_proto_msgTypes, |
|||
}.Build() |
|||
File_mount_proto = out.File |
|||
file_mount_proto_rawDesc = nil |
|||
file_mount_proto_goTypes = nil |
|||
file_mount_proto_depIdxs = nil |
|||
} |
@ -0,0 +1,100 @@ |
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
|||
|
|||
package mount_pb |
|||
|
|||
import ( |
|||
context "context" |
|||
grpc "google.golang.org/grpc" |
|||
codes "google.golang.org/grpc/codes" |
|||
status "google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the grpc package it is being compiled against.
|
|||
const _ = grpc.SupportPackageIsVersion7 |
|||
|
|||
// SeaweedMountClient is the client API for SeaweedMount service.
|
|||
//
|
|||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
|||
type SeaweedMountClient interface { |
|||
Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) |
|||
} |
|||
|
|||
type seaweedMountClient struct { |
|||
cc grpc.ClientConnInterface |
|||
} |
|||
|
|||
func NewSeaweedMountClient(cc grpc.ClientConnInterface) SeaweedMountClient { |
|||
return &seaweedMountClient{cc} |
|||
} |
|||
|
|||
func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) { |
|||
out := new(ConfigureResponse) |
|||
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMount/Configure", in, out, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
// SeaweedMountServer is the server API for SeaweedMount service.
|
|||
// All implementations must embed UnimplementedSeaweedMountServer
|
|||
// for forward compatibility
|
|||
type SeaweedMountServer interface { |
|||
Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) |
|||
mustEmbedUnimplementedSeaweedMountServer() |
|||
} |
|||
|
|||
// UnimplementedSeaweedMountServer must be embedded to have forward compatible implementations.
|
|||
type UnimplementedSeaweedMountServer struct { |
|||
} |
|||
|
|||
func (UnimplementedSeaweedMountServer) Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) { |
|||
return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") |
|||
} |
|||
func (UnimplementedSeaweedMountServer) mustEmbedUnimplementedSeaweedMountServer() {} |
|||
|
|||
// UnsafeSeaweedMountServer may be embedded to opt out of forward compatibility for this service.
|
|||
// Use of this interface is not recommended, as added methods to SeaweedMountServer will
|
|||
// result in compilation errors.
|
|||
type UnsafeSeaweedMountServer interface { |
|||
mustEmbedUnimplementedSeaweedMountServer() |
|||
} |
|||
|
|||
func RegisterSeaweedMountServer(s grpc.ServiceRegistrar, srv SeaweedMountServer) { |
|||
s.RegisterService(&SeaweedMount_ServiceDesc, srv) |
|||
} |
|||
|
|||
func _SeaweedMount_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(ConfigureRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(SeaweedMountServer).Configure(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/messaging_pb.SeaweedMount/Configure", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(SeaweedMountServer).Configure(ctx, req.(*ConfigureRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
// SeaweedMount_ServiceDesc is the grpc.ServiceDesc for SeaweedMount service.
|
|||
// It's only intended for direct use with grpc.RegisterService,
|
|||
// and not to be introspected or modified (even as a copy)
|
|||
var SeaweedMount_ServiceDesc = grpc.ServiceDesc{ |
|||
ServiceName: "messaging_pb.SeaweedMount", |
|||
HandlerType: (*SeaweedMountServer)(nil), |
|||
Methods: []grpc.MethodDesc{ |
|||
{ |
|||
MethodName: "Configure", |
|||
Handler: _SeaweedMount_Configure_Handler, |
|||
}, |
|||
}, |
|||
Streams: []grpc.StreamDesc{}, |
|||
Metadata: "mount.proto", |
|||
} |
881
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,177 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/cluster" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
"time" |
|||
) |
|||
|
|||
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) { |
|||
|
|||
var output *master_pb.StatisticsResponse |
|||
|
|||
err = fs.filer.MasterClient.WithClient(false, func(masterClient master_pb.SeaweedClient) error { |
|||
grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{ |
|||
Replication: req.Replication, |
|||
Collection: req.Collection, |
|||
Ttl: req.Ttl, |
|||
DiskType: req.DiskType, |
|||
}) |
|||
if grpcErr != nil { |
|||
return grpcErr |
|||
} |
|||
|
|||
output = grpcResponse |
|||
return nil |
|||
}) |
|||
|
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return &filer_pb.StatisticsResponse{ |
|||
TotalSize: output.TotalSize, |
|||
UsedSize: output.UsedSize, |
|||
FileCount: output.FileCount, |
|||
}, nil |
|||
} |
|||
|
|||
func (fs *FilerServer) Ping(ctx context.Context, req *filer_pb.PingRequest) (resp *filer_pb.PingResponse, pingErr error) { |
|||
resp = &filer_pb.PingResponse{ |
|||
StartTimeNs: time.Now().UnixNano(), |
|||
} |
|||
if req.TargetType == cluster.FilerType { |
|||
pingErr = pb.WithFilerClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
pingResp, err := client.Ping(ctx, &filer_pb.PingRequest{}) |
|||
if pingResp != nil { |
|||
resp.RemoteTimeNs = pingResp.StartTimeNs |
|||
} |
|||
return err |
|||
}) |
|||
} |
|||
if req.TargetType == cluster.VolumeServerType { |
|||
pingErr = pb.WithVolumeServerClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { |
|||
pingResp, err := client.Ping(ctx, &volume_server_pb.PingRequest{}) |
|||
if pingResp != nil { |
|||
resp.RemoteTimeNs = pingResp.StartTimeNs |
|||
} |
|||
return err |
|||
}) |
|||
} |
|||
if req.TargetType == cluster.MasterType { |
|||
pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client master_pb.SeaweedClient) error { |
|||
pingResp, err := client.Ping(ctx, &master_pb.PingRequest{}) |
|||
if pingResp != nil { |
|||
resp.RemoteTimeNs = pingResp.StartTimeNs |
|||
} |
|||
return err |
|||
}) |
|||
} |
|||
if pingErr != nil { |
|||
pingErr = fmt.Errorf("ping %s %s: %v", req.TargetType, req.Target, pingErr) |
|||
} |
|||
resp.StopTimeNs = time.Now().UnixNano() |
|||
return |
|||
} |
|||
|
|||
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) { |
|||
|
|||
clusterId, _ := fs.filer.Store.KvGet(context.Background(), []byte("clusterId")) |
|||
|
|||
t := &filer_pb.GetFilerConfigurationResponse{ |
|||
Masters: pb.ToAddressStringsFromMap(fs.option.Masters), |
|||
Collection: fs.option.Collection, |
|||
Replication: fs.option.DefaultReplication, |
|||
MaxMb: uint32(fs.option.MaxMB), |
|||
DirBuckets: fs.filer.DirBucketsPath, |
|||
Cipher: fs.filer.Cipher, |
|||
Signature: fs.filer.Signature, |
|||
MetricsAddress: fs.metricsAddress, |
|||
MetricsIntervalSec: int32(fs.metricsIntervalSec), |
|||
Version: util.Version(), |
|||
ClusterId: string(clusterId), |
|||
} |
|||
|
|||
glog.V(4).Infof("GetFilerConfiguration: %v", t) |
|||
|
|||
return t, nil |
|||
} |
|||
|
|||
func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error { |
|||
|
|||
req, err := stream.Recv() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
clientName := util.JoinHostPort(req.Name, int(req.GrpcPort)) |
|||
m := make(map[string]bool) |
|||
for _, tp := range req.Resources { |
|||
m[tp] = true |
|||
} |
|||
fs.brokersLock.Lock() |
|||
fs.brokers[clientName] = m |
|||
glog.V(0).Infof("+ broker %v", clientName) |
|||
fs.brokersLock.Unlock() |
|||
|
|||
defer func() { |
|||
fs.brokersLock.Lock() |
|||
delete(fs.brokers, clientName) |
|||
glog.V(0).Infof("- broker %v: %v", clientName, err) |
|||
fs.brokersLock.Unlock() |
|||
}() |
|||
|
|||
for { |
|||
if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil { |
|||
glog.V(0).Infof("send broker %v: %+v", clientName, err) |
|||
return err |
|||
} |
|||
// println("replied")
|
|||
|
|||
if _, err := stream.Recv(); err != nil { |
|||
glog.V(0).Infof("recv broker %v: %v", clientName, err) |
|||
return err |
|||
} |
|||
// println("received")
|
|||
} |
|||
|
|||
} |
|||
|
|||
func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) { |
|||
|
|||
resp = &filer_pb.LocateBrokerResponse{} |
|||
|
|||
fs.brokersLock.Lock() |
|||
defer fs.brokersLock.Unlock() |
|||
|
|||
var localBrokers []*filer_pb.LocateBrokerResponse_Resource |
|||
|
|||
for b, m := range fs.brokers { |
|||
if _, found := m[req.Resource]; found { |
|||
resp.Found = true |
|||
resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{ |
|||
{ |
|||
GrpcAddresses: b, |
|||
ResourceCount: int32(len(m)), |
|||
}, |
|||
} |
|||
return |
|||
} |
|||
localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{ |
|||
GrpcAddresses: b, |
|||
ResourceCount: int32(len(m)), |
|||
}) |
|||
} |
|||
|
|||
resp.Resources = localBrokers |
|||
|
|||
return resp, nil |
|||
|
|||
} |
@ -0,0 +1,11 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"github.com/chrislusf/seaweedfs/weed/operation" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
) |
|||
|
|||
func (fs *FilerServer) maybeMergeChunks(so *operation.StorageOption, inputChunks []*filer_pb.FileChunk) (mergedChunks []*filer_pb.FileChunk, err error) { |
|||
//TODO merge consecutive smaller chunks into a large chunk to reduce number of chunks
|
|||
return inputChunks, nil |
|||
} |
@ -0,0 +1,66 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/cluster" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
|||
"github.com/hashicorp/raft" |
|||
) |
|||
|
|||
func (ms *MasterServer) RaftListClusterServers(ctx context.Context, req *master_pb.RaftListClusterServersRequest) (*master_pb.RaftListClusterServersResponse, error) { |
|||
resp := &master_pb.RaftListClusterServersResponse{} |
|||
|
|||
servers := ms.Topo.HashicorpRaft.GetConfiguration().Configuration().Servers |
|||
|
|||
for _, server := range servers { |
|||
resp.ClusterServers = append(resp.ClusterServers, &master_pb.RaftListClusterServersResponse_ClusterServers{ |
|||
Id: string(server.ID), |
|||
Address: string(server.Address), |
|||
Suffrage: server.Suffrage.String(), |
|||
}) |
|||
} |
|||
return resp, nil |
|||
} |
|||
|
|||
func (ms *MasterServer) RaftAddServer(ctx context.Context, req *master_pb.RaftAddServerRequest) (*master_pb.RaftAddServerResponse, error) { |
|||
resp := &master_pb.RaftAddServerResponse{} |
|||
if ms.Topo.HashicorpRaft.State() != raft.Leader { |
|||
return nil, fmt.Errorf("raft add server %s failed: %s is no current leader", req.Id, ms.Topo.HashicorpRaft.String()) |
|||
} |
|||
|
|||
var idxFuture raft.IndexFuture |
|||
if req.Voter { |
|||
idxFuture = ms.Topo.HashicorpRaft.AddVoter(raft.ServerID(req.Id), raft.ServerAddress(req.Address), 0, 0) |
|||
} else { |
|||
idxFuture = ms.Topo.HashicorpRaft.AddNonvoter(raft.ServerID(req.Id), raft.ServerAddress(req.Address), 0, 0) |
|||
} |
|||
|
|||
if err := idxFuture.Error(); err != nil { |
|||
return nil, err |
|||
} |
|||
return resp, nil |
|||
} |
|||
|
|||
func (ms *MasterServer) RaftRemoveServer(ctx context.Context, req *master_pb.RaftRemoveServerRequest) (*master_pb.RaftRemoveServerResponse, error) { |
|||
resp := &master_pb.RaftRemoveServerResponse{} |
|||
|
|||
if ms.Topo.HashicorpRaft.State() != raft.Leader { |
|||
return nil, fmt.Errorf("raft remove server %s failed: %s is no current leader", req.Id, ms.Topo.HashicorpRaft.String()) |
|||
} |
|||
|
|||
if !req.Force { |
|||
ms.clientChansLock.RLock() |
|||
_, ok := ms.clientChans[fmt.Sprintf("%s@%s", cluster.MasterType, req.Id)] |
|||
ms.clientChansLock.RUnlock() |
|||
if ok { |
|||
return resp, fmt.Errorf("raft remove server %s failed: client connection to master exists", req.Id) |
|||
} |
|||
} |
|||
|
|||
idxFuture := ms.Topo.HashicorpRaft.RemoveServer(raft.ServerID(req.Id), 0, 0) |
|||
if err := idxFuture.Error(); err != nil { |
|||
return nil, err |
|||
} |
|||
return resp, nil |
|||
} |
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue