From d8cfa1552b4958a6d803184eb423a29c1f18aeaf Mon Sep 17 00:00:00 2001 From: Guo Lei Date: Wed, 28 Dec 2022 17:36:44 +0800 Subject: [PATCH 01/19] support enable/disable vacuum (#4087) * stop vacuum * suspend/resume vacuum * remove unused code * rename * rename param --- weed/pb/filer_pb/filer.pb.go | 6 +- weed/pb/filer_pb/filer_grpc.pb.go | 4 - weed/pb/iam_pb/iam_grpc.pb.go | 4 - weed/pb/master.proto | 14 + weed/pb/master_pb/master.pb.go | 664 +++++++++++++------ weed/pb/master_pb/master_grpc.pb.go | 76 ++- weed/pb/mount_pb/mount_grpc.pb.go | 4 - weed/pb/mq_pb/mq.pb.go | 4 +- weed/pb/mq_pb/mq_grpc.pb.go | 4 - weed/pb/remote_pb/remote.pb.go | 4 +- weed/pb/s3_pb/s3_grpc.pb.go | 4 - weed/pb/volume_server_pb/volume_server.pb.go | 4 +- weed/server/master_grpc_server_volume.go | 17 +- weed/shell/command_volume_vacuum_disable.go | 41 ++ weed/shell/command_volume_vacuum_enable.go | 41 ++ weed/topology/topology.go | 11 + weed/topology/topology_event_handling.go | 9 +- 17 files changed, 656 insertions(+), 255 deletions(-) create mode 100644 weed/shell/command_volume_vacuum_disable.go create mode 100644 weed/shell/command_volume_vacuum_enable.go diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index fa21531af..f7ec188b4 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc-gen-go v1.28.1 +// protoc v3.21.4 // source: filer.proto package filer_pb @@ -4480,7 +4480,7 @@ var file_filer_proto_goTypes = []interface{}{ var file_filer_proto_depIdxs = []int32{ 5, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry 5, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry - 8, // 2: filer_pb.Entry.GetChunks():type_name -> filer_pb.FileChunk + 8, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk 11, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes 55, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry 4, // 5: filer_pb.Entry.remote_entry:type_name -> filer_pb.RemoteEntry diff --git a/weed/pb/filer_pb/filer_grpc.pb.go b/weed/pb/filer_pb/filer_grpc.pb.go index dad1d7e68..270e13e6f 100644 --- a/weed/pb/filer_pb/filer_grpc.pb.go +++ b/weed/pb/filer_pb/filer_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.4 -// source: filer.proto package filer_pb diff --git a/weed/pb/iam_pb/iam_grpc.pb.go b/weed/pb/iam_pb/iam_grpc.pb.go index f02c442ae..b9438a295 100644 --- a/weed/pb/iam_pb/iam_grpc.pb.go +++ b/weed/pb/iam_pb/iam_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.4 -// source: iam.proto package iam_pb diff --git a/weed/pb/master.proto b/weed/pb/master.proto index dcd53d396..50ed98bb5 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -27,6 +27,10 @@ service Seaweed { } rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) { } + rpc DisableVacuum (DisableVacuumRequest) returns (DisableVacuumResponse) { + } + rpc EnableVacuum (EnableVacuumRequest) returns (EnableVacuumResponse) { + } rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { } rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { @@ -305,6 +309,16 @@ message VacuumVolumeRequest { message VacuumVolumeResponse { } +message DisableVacuumRequest { +} +message DisableVacuumResponse { +} + +message EnableVacuumRequest { +} +message EnableVacuumResponse { +} + message VolumeMarkReadonlyRequest { string ip = 1; uint32 port = 2; diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index fc00c7795..27d1e6b1a 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -1611,7 +1611,9 @@ func (x *StatisticsResponse) GetFileCount() uint64 { return 0 } +// // collection related +// type Collection struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1846,7 +1848,9 @@ func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return file_master_proto_rawDescGZIP(), []int{23} } +// // volume related +// type DiskInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2498,6 +2502,158 @@ func (*VacuumVolumeResponse) Descriptor() ([]byte, []int) { return file_master_proto_rawDescGZIP(), []int{34} } +type DisableVacuumRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DisableVacuumRequest) Reset() { + *x = DisableVacuumRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisableVacuumRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisableVacuumRequest) ProtoMessage() {} + +func (x *DisableVacuumRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisableVacuumRequest.ProtoReflect.Descriptor instead. +func (*DisableVacuumRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{35} +} + +type DisableVacuumResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DisableVacuumResponse) Reset() { + *x = DisableVacuumResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DisableVacuumResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisableVacuumResponse) ProtoMessage() {} + +func (x *DisableVacuumResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DisableVacuumResponse.ProtoReflect.Descriptor instead. +func (*DisableVacuumResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{36} +} + +type EnableVacuumRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *EnableVacuumRequest) Reset() { + *x = EnableVacuumRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnableVacuumRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnableVacuumRequest) ProtoMessage() {} + +func (x *EnableVacuumRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnableVacuumRequest.ProtoReflect.Descriptor instead. +func (*EnableVacuumRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{37} +} + +type EnableVacuumResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *EnableVacuumResponse) Reset() { + *x = EnableVacuumResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnableVacuumResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnableVacuumResponse) ProtoMessage() {} + +func (x *EnableVacuumResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnableVacuumResponse.ProtoReflect.Descriptor instead. +func (*EnableVacuumResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{38} +} + type VolumeMarkReadonlyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2517,7 +2673,7 @@ type VolumeMarkReadonlyRequest struct { func (x *VolumeMarkReadonlyRequest) Reset() { *x = VolumeMarkReadonlyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[35] + mi := &file_master_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2530,7 +2686,7 @@ func (x *VolumeMarkReadonlyRequest) String() string { func (*VolumeMarkReadonlyRequest) ProtoMessage() {} func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[35] + mi := &file_master_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2543,7 +2699,7 @@ func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead. func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{35} + return file_master_proto_rawDescGZIP(), []int{39} } func (x *VolumeMarkReadonlyRequest) GetIp() string { @@ -2618,7 +2774,7 @@ type VolumeMarkReadonlyResponse struct { func (x *VolumeMarkReadonlyResponse) Reset() { *x = VolumeMarkReadonlyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[36] + mi := &file_master_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2631,7 +2787,7 @@ func (x *VolumeMarkReadonlyResponse) String() string { func (*VolumeMarkReadonlyResponse) ProtoMessage() {} func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[36] + mi := &file_master_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2644,7 +2800,7 @@ func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead. func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{36} + return file_master_proto_rawDescGZIP(), []int{40} } type GetMasterConfigurationRequest struct { @@ -2656,7 +2812,7 @@ type GetMasterConfigurationRequest struct { func (x *GetMasterConfigurationRequest) Reset() { *x = GetMasterConfigurationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[37] + mi := &file_master_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2669,7 +2825,7 @@ func (x *GetMasterConfigurationRequest) String() string { func (*GetMasterConfigurationRequest) ProtoMessage() {} func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[37] + mi := &file_master_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2682,7 +2838,7 @@ func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMasterConfigurationRequest.ProtoReflect.Descriptor instead. func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{37} + return file_master_proto_rawDescGZIP(), []int{41} } type GetMasterConfigurationResponse struct { @@ -2702,7 +2858,7 @@ type GetMasterConfigurationResponse struct { func (x *GetMasterConfigurationResponse) Reset() { *x = GetMasterConfigurationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[38] + mi := &file_master_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2715,7 +2871,7 @@ func (x *GetMasterConfigurationResponse) String() string { func (*GetMasterConfigurationResponse) ProtoMessage() {} func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[38] + mi := &file_master_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2728,7 +2884,7 @@ func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMasterConfigurationResponse.ProtoReflect.Descriptor instead. func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{38} + return file_master_proto_rawDescGZIP(), []int{42} } func (x *GetMasterConfigurationResponse) GetMetricsAddress() string { @@ -2794,7 +2950,7 @@ type ListClusterNodesRequest struct { func (x *ListClusterNodesRequest) Reset() { *x = ListClusterNodesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[39] + mi := &file_master_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2807,7 +2963,7 @@ func (x *ListClusterNodesRequest) String() string { func (*ListClusterNodesRequest) ProtoMessage() {} func (x *ListClusterNodesRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[39] + mi := &file_master_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2820,7 +2976,7 @@ func (x *ListClusterNodesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListClusterNodesRequest.ProtoReflect.Descriptor instead. func (*ListClusterNodesRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{39} + return file_master_proto_rawDescGZIP(), []int{43} } func (x *ListClusterNodesRequest) GetClientType() string { @@ -2862,7 +3018,7 @@ type ListClusterNodesResponse struct { func (x *ListClusterNodesResponse) Reset() { *x = ListClusterNodesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[40] + mi := &file_master_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2875,7 +3031,7 @@ func (x *ListClusterNodesResponse) String() string { func (*ListClusterNodesResponse) ProtoMessage() {} func (x *ListClusterNodesResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[40] + mi := &file_master_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2888,7 +3044,7 @@ func (x *ListClusterNodesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListClusterNodesResponse.ProtoReflect.Descriptor instead. func (*ListClusterNodesResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{40} + return file_master_proto_rawDescGZIP(), []int{44} } func (x *ListClusterNodesResponse) GetClusterNodes() []*ListClusterNodesResponse_ClusterNode { @@ -2913,7 +3069,7 @@ type LeaseAdminTokenRequest struct { func (x *LeaseAdminTokenRequest) Reset() { *x = LeaseAdminTokenRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[41] + mi := &file_master_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2926,7 +3082,7 @@ func (x *LeaseAdminTokenRequest) String() string { func (*LeaseAdminTokenRequest) ProtoMessage() {} func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[41] + mi := &file_master_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2939,7 +3095,7 @@ func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LeaseAdminTokenRequest.ProtoReflect.Descriptor instead. func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{41} + return file_master_proto_rawDescGZIP(), []int{45} } func (x *LeaseAdminTokenRequest) GetPreviousToken() int64 { @@ -2989,7 +3145,7 @@ type LeaseAdminTokenResponse struct { func (x *LeaseAdminTokenResponse) Reset() { *x = LeaseAdminTokenResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[42] + mi := &file_master_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3002,7 +3158,7 @@ func (x *LeaseAdminTokenResponse) String() string { func (*LeaseAdminTokenResponse) ProtoMessage() {} func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[42] + mi := &file_master_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3015,7 +3171,7 @@ func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LeaseAdminTokenResponse.ProtoReflect.Descriptor instead. func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{42} + return file_master_proto_rawDescGZIP(), []int{46} } func (x *LeaseAdminTokenResponse) GetToken() int64 { @@ -3045,7 +3201,7 @@ type ReleaseAdminTokenRequest struct { func (x *ReleaseAdminTokenRequest) Reset() { *x = ReleaseAdminTokenRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[43] + mi := &file_master_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3058,7 +3214,7 @@ func (x *ReleaseAdminTokenRequest) String() string { func (*ReleaseAdminTokenRequest) ProtoMessage() {} func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[43] + mi := &file_master_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3071,7 +3227,7 @@ func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReleaseAdminTokenRequest.ProtoReflect.Descriptor instead. func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{43} + return file_master_proto_rawDescGZIP(), []int{47} } func (x *ReleaseAdminTokenRequest) GetPreviousToken() int64 { @@ -3104,7 +3260,7 @@ type ReleaseAdminTokenResponse struct { func (x *ReleaseAdminTokenResponse) Reset() { *x = ReleaseAdminTokenResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[44] + mi := &file_master_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3117,7 +3273,7 @@ func (x *ReleaseAdminTokenResponse) String() string { func (*ReleaseAdminTokenResponse) ProtoMessage() {} func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[44] + mi := &file_master_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3130,7 +3286,7 @@ func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReleaseAdminTokenResponse.ProtoReflect.Descriptor instead. func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{44} + return file_master_proto_rawDescGZIP(), []int{48} } type PingRequest struct { @@ -3145,7 +3301,7 @@ type PingRequest struct { func (x *PingRequest) Reset() { *x = PingRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[45] + mi := &file_master_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3158,7 +3314,7 @@ func (x *PingRequest) String() string { func (*PingRequest) ProtoMessage() {} func (x *PingRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[45] + mi := &file_master_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3171,7 +3327,7 @@ func (x *PingRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. func (*PingRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{45} + return file_master_proto_rawDescGZIP(), []int{49} } func (x *PingRequest) GetTarget() string { @@ -3201,7 +3357,7 @@ type PingResponse struct { func (x *PingResponse) Reset() { *x = PingResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[46] + mi := &file_master_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3214,7 +3370,7 @@ func (x *PingResponse) String() string { func (*PingResponse) ProtoMessage() {} func (x *PingResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[46] + mi := &file_master_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,7 +3383,7 @@ func (x *PingResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. func (*PingResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{46} + return file_master_proto_rawDescGZIP(), []int{50} } func (x *PingResponse) GetStartTimeNs() int64 { @@ -3264,7 +3420,7 @@ type RaftAddServerRequest struct { func (x *RaftAddServerRequest) Reset() { *x = RaftAddServerRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[47] + mi := &file_master_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3277,7 +3433,7 @@ func (x *RaftAddServerRequest) String() string { func (*RaftAddServerRequest) ProtoMessage() {} func (x *RaftAddServerRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[47] + mi := &file_master_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3290,7 +3446,7 @@ func (x *RaftAddServerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftAddServerRequest.ProtoReflect.Descriptor instead. func (*RaftAddServerRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{47} + return file_master_proto_rawDescGZIP(), []int{51} } func (x *RaftAddServerRequest) GetId() string { @@ -3323,7 +3479,7 @@ type RaftAddServerResponse struct { func (x *RaftAddServerResponse) Reset() { *x = RaftAddServerResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[48] + mi := &file_master_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3336,7 +3492,7 @@ func (x *RaftAddServerResponse) String() string { func (*RaftAddServerResponse) ProtoMessage() {} func (x *RaftAddServerResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[48] + mi := &file_master_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3349,7 +3505,7 @@ func (x *RaftAddServerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftAddServerResponse.ProtoReflect.Descriptor instead. func (*RaftAddServerResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{48} + return file_master_proto_rawDescGZIP(), []int{52} } type RaftRemoveServerRequest struct { @@ -3364,7 +3520,7 @@ type RaftRemoveServerRequest struct { func (x *RaftRemoveServerRequest) Reset() { *x = RaftRemoveServerRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[49] + mi := &file_master_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3377,7 +3533,7 @@ func (x *RaftRemoveServerRequest) String() string { func (*RaftRemoveServerRequest) ProtoMessage() {} func (x *RaftRemoveServerRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[49] + mi := &file_master_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3390,7 +3546,7 @@ func (x *RaftRemoveServerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftRemoveServerRequest.ProtoReflect.Descriptor instead. func (*RaftRemoveServerRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{49} + return file_master_proto_rawDescGZIP(), []int{53} } func (x *RaftRemoveServerRequest) GetId() string { @@ -3416,7 +3572,7 @@ type RaftRemoveServerResponse struct { func (x *RaftRemoveServerResponse) Reset() { *x = RaftRemoveServerResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[50] + mi := &file_master_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3429,7 +3585,7 @@ func (x *RaftRemoveServerResponse) String() string { func (*RaftRemoveServerResponse) ProtoMessage() {} func (x *RaftRemoveServerResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[50] + mi := &file_master_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3442,7 +3598,7 @@ func (x *RaftRemoveServerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftRemoveServerResponse.ProtoReflect.Descriptor instead. func (*RaftRemoveServerResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{50} + return file_master_proto_rawDescGZIP(), []int{54} } type RaftListClusterServersRequest struct { @@ -3454,7 +3610,7 @@ type RaftListClusterServersRequest struct { func (x *RaftListClusterServersRequest) Reset() { *x = RaftListClusterServersRequest{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[51] + mi := &file_master_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3467,7 +3623,7 @@ func (x *RaftListClusterServersRequest) String() string { func (*RaftListClusterServersRequest) ProtoMessage() {} func (x *RaftListClusterServersRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[51] + mi := &file_master_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3480,7 +3636,7 @@ func (x *RaftListClusterServersRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftListClusterServersRequest.ProtoReflect.Descriptor instead. func (*RaftListClusterServersRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{51} + return file_master_proto_rawDescGZIP(), []int{55} } type RaftListClusterServersResponse struct { @@ -3494,7 +3650,7 @@ type RaftListClusterServersResponse struct { func (x *RaftListClusterServersResponse) Reset() { *x = RaftListClusterServersResponse{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[52] + mi := &file_master_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3507,7 +3663,7 @@ func (x *RaftListClusterServersResponse) String() string { func (*RaftListClusterServersResponse) ProtoMessage() {} func (x *RaftListClusterServersResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[52] + mi := &file_master_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3520,7 +3676,7 @@ func (x *RaftListClusterServersResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftListClusterServersResponse.ProtoReflect.Descriptor instead. func (*RaftListClusterServersResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{52} + return file_master_proto_rawDescGZIP(), []int{56} } func (x *RaftListClusterServersResponse) GetClusterServers() []*RaftListClusterServersResponse_ClusterServers { @@ -3543,7 +3699,7 @@ type SuperBlockExtra_ErasureCoding struct { func (x *SuperBlockExtra_ErasureCoding) Reset() { *x = SuperBlockExtra_ErasureCoding{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[55] + mi := &file_master_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3556,7 +3712,7 @@ func (x *SuperBlockExtra_ErasureCoding) String() string { func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} func (x *SuperBlockExtra_ErasureCoding) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[55] + mi := &file_master_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3607,7 +3763,7 @@ type LookupVolumeResponse_VolumeIdLocation struct { func (x *LookupVolumeResponse_VolumeIdLocation) Reset() { *x = LookupVolumeResponse_VolumeIdLocation{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[56] + mi := &file_master_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3620,7 +3776,7 @@ func (x *LookupVolumeResponse_VolumeIdLocation) String() string { func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} func (x *LookupVolumeResponse_VolumeIdLocation) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[56] + mi := &file_master_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3676,7 +3832,7 @@ type LookupEcVolumeResponse_EcShardIdLocation struct { func (x *LookupEcVolumeResponse_EcShardIdLocation) Reset() { *x = LookupEcVolumeResponse_EcShardIdLocation{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[61] + mi := &file_master_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3689,7 +3845,7 @@ func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string { func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[61] + mi := &file_master_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3735,7 +3891,7 @@ type ListClusterNodesResponse_ClusterNode struct { func (x *ListClusterNodesResponse_ClusterNode) Reset() { *x = ListClusterNodesResponse_ClusterNode{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[62] + mi := &file_master_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3748,7 +3904,7 @@ func (x *ListClusterNodesResponse_ClusterNode) String() string { func (*ListClusterNodesResponse_ClusterNode) ProtoMessage() {} func (x *ListClusterNodesResponse_ClusterNode) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[62] + mi := &file_master_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3761,7 +3917,7 @@ func (x *ListClusterNodesResponse_ClusterNode) ProtoReflect() protoreflect.Messa // Deprecated: Use ListClusterNodesResponse_ClusterNode.ProtoReflect.Descriptor instead. func (*ListClusterNodesResponse_ClusterNode) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{40, 0} + return file_master_proto_rawDescGZIP(), []int{44, 0} } func (x *ListClusterNodesResponse_ClusterNode) GetAddress() string { @@ -3820,7 +3976,7 @@ type RaftListClusterServersResponse_ClusterServers struct { func (x *RaftListClusterServersResponse_ClusterServers) Reset() { *x = RaftListClusterServersResponse_ClusterServers{} if protoimpl.UnsafeEnabled { - mi := &file_master_proto_msgTypes[63] + mi := &file_master_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3833,7 +3989,7 @@ func (x *RaftListClusterServersResponse_ClusterServers) String() string { func (*RaftListClusterServersResponse_ClusterServers) ProtoMessage() {} func (x *RaftListClusterServersResponse_ClusterServers) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[63] + mi := &file_master_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3846,7 +4002,7 @@ func (x *RaftListClusterServersResponse_ClusterServers) ProtoReflect() protorefl // Deprecated: Use RaftListClusterServersResponse_ClusterServers.ProtoReflect.Descriptor instead. func (*RaftListClusterServersResponse_ClusterServers) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{52, 0} + return file_master_proto_rawDescGZIP(), []int{56, 0} } func (x *RaftListClusterServersResponse_ClusterServers) GetId() string { @@ -4319,6 +4475,12 @@ var file_master_proto_rawDesc = []byte{ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x16, 0x0a, 0x14, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x15, 0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, @@ -4463,7 +4625,7 @@ var file_master_proto_rawDesc = []byte{ 0x66, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x32, 0x94, 0x0d, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, + 0x72, 0x32, 0xbd, 0x0e, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, @@ -4515,64 +4677,74 @@ var file_master_proto_rawDesc = []byte{ 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, - 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x24, 0x2e, 0x6d, 0x61, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x25, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, - 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, - 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, - 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x4c, 0x69, - 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x22, - 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, - 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, - 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, - 0x16, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x28, 0x2e, 0x6d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, - 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, + 0x61, 0x63, 0x75, 0x75, 0x6d, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, + 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, + 0x6e, 0x6c, 0x79, 0x12, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, + 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, + 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, - 0x2e, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, - 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x52, 0x61, 0x66, - 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x22, 0x2e, - 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, - 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, - 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, - 0x70, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, + 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x39, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x17, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x52, + 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, + 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1f, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x41, + 0x64, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, + 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4587,7 +4759,7 @@ func file_master_proto_rawDescGZIP() []byte { return file_master_proto_rawDescData } -var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 64) +var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 68) var file_master_proto_goTypes = []interface{}{ (*Heartbeat)(nil), // 0: master_pb.Heartbeat (*HeartbeatResponse)(nil), // 1: master_pb.HeartbeatResponse @@ -4624,35 +4796,39 @@ var file_master_proto_goTypes = []interface{}{ (*LookupEcVolumeResponse)(nil), // 32: master_pb.LookupEcVolumeResponse (*VacuumVolumeRequest)(nil), // 33: master_pb.VacuumVolumeRequest (*VacuumVolumeResponse)(nil), // 34: master_pb.VacuumVolumeResponse - (*VolumeMarkReadonlyRequest)(nil), // 35: master_pb.VolumeMarkReadonlyRequest - (*VolumeMarkReadonlyResponse)(nil), // 36: master_pb.VolumeMarkReadonlyResponse - (*GetMasterConfigurationRequest)(nil), // 37: master_pb.GetMasterConfigurationRequest - (*GetMasterConfigurationResponse)(nil), // 38: master_pb.GetMasterConfigurationResponse - (*ListClusterNodesRequest)(nil), // 39: master_pb.ListClusterNodesRequest - (*ListClusterNodesResponse)(nil), // 40: master_pb.ListClusterNodesResponse - (*LeaseAdminTokenRequest)(nil), // 41: master_pb.LeaseAdminTokenRequest - (*LeaseAdminTokenResponse)(nil), // 42: master_pb.LeaseAdminTokenResponse - (*ReleaseAdminTokenRequest)(nil), // 43: master_pb.ReleaseAdminTokenRequest - (*ReleaseAdminTokenResponse)(nil), // 44: master_pb.ReleaseAdminTokenResponse - (*PingRequest)(nil), // 45: master_pb.PingRequest - (*PingResponse)(nil), // 46: master_pb.PingResponse - (*RaftAddServerRequest)(nil), // 47: master_pb.RaftAddServerRequest - (*RaftAddServerResponse)(nil), // 48: master_pb.RaftAddServerResponse - (*RaftRemoveServerRequest)(nil), // 49: master_pb.RaftRemoveServerRequest - (*RaftRemoveServerResponse)(nil), // 50: master_pb.RaftRemoveServerResponse - (*RaftListClusterServersRequest)(nil), // 51: master_pb.RaftListClusterServersRequest - (*RaftListClusterServersResponse)(nil), // 52: master_pb.RaftListClusterServersResponse - nil, // 53: master_pb.Heartbeat.MaxVolumeCountsEntry - nil, // 54: master_pb.StorageBackend.PropertiesEntry - (*SuperBlockExtra_ErasureCoding)(nil), // 55: master_pb.SuperBlockExtra.ErasureCoding - (*LookupVolumeResponse_VolumeIdLocation)(nil), // 56: master_pb.LookupVolumeResponse.VolumeIdLocation - nil, // 57: master_pb.DataNodeInfo.DiskInfosEntry - nil, // 58: master_pb.RackInfo.DiskInfosEntry - nil, // 59: master_pb.DataCenterInfo.DiskInfosEntry - nil, // 60: master_pb.TopologyInfo.DiskInfosEntry - (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 61: master_pb.LookupEcVolumeResponse.EcShardIdLocation - (*ListClusterNodesResponse_ClusterNode)(nil), // 62: master_pb.ListClusterNodesResponse.ClusterNode - (*RaftListClusterServersResponse_ClusterServers)(nil), // 63: master_pb.RaftListClusterServersResponse.ClusterServers + (*DisableVacuumRequest)(nil), // 35: master_pb.DisableVacuumRequest + (*DisableVacuumResponse)(nil), // 36: master_pb.DisableVacuumResponse + (*EnableVacuumRequest)(nil), // 37: master_pb.EnableVacuumRequest + (*EnableVacuumResponse)(nil), // 38: master_pb.EnableVacuumResponse + (*VolumeMarkReadonlyRequest)(nil), // 39: master_pb.VolumeMarkReadonlyRequest + (*VolumeMarkReadonlyResponse)(nil), // 40: master_pb.VolumeMarkReadonlyResponse + (*GetMasterConfigurationRequest)(nil), // 41: master_pb.GetMasterConfigurationRequest + (*GetMasterConfigurationResponse)(nil), // 42: master_pb.GetMasterConfigurationResponse + (*ListClusterNodesRequest)(nil), // 43: master_pb.ListClusterNodesRequest + (*ListClusterNodesResponse)(nil), // 44: master_pb.ListClusterNodesResponse + (*LeaseAdminTokenRequest)(nil), // 45: master_pb.LeaseAdminTokenRequest + (*LeaseAdminTokenResponse)(nil), // 46: master_pb.LeaseAdminTokenResponse + (*ReleaseAdminTokenRequest)(nil), // 47: master_pb.ReleaseAdminTokenRequest + (*ReleaseAdminTokenResponse)(nil), // 48: master_pb.ReleaseAdminTokenResponse + (*PingRequest)(nil), // 49: master_pb.PingRequest + (*PingResponse)(nil), // 50: master_pb.PingResponse + (*RaftAddServerRequest)(nil), // 51: master_pb.RaftAddServerRequest + (*RaftAddServerResponse)(nil), // 52: master_pb.RaftAddServerResponse + (*RaftRemoveServerRequest)(nil), // 53: master_pb.RaftRemoveServerRequest + (*RaftRemoveServerResponse)(nil), // 54: master_pb.RaftRemoveServerResponse + (*RaftListClusterServersRequest)(nil), // 55: master_pb.RaftListClusterServersRequest + (*RaftListClusterServersResponse)(nil), // 56: master_pb.RaftListClusterServersResponse + nil, // 57: master_pb.Heartbeat.MaxVolumeCountsEntry + nil, // 58: master_pb.StorageBackend.PropertiesEntry + (*SuperBlockExtra_ErasureCoding)(nil), // 59: master_pb.SuperBlockExtra.ErasureCoding + (*LookupVolumeResponse_VolumeIdLocation)(nil), // 60: master_pb.LookupVolumeResponse.VolumeIdLocation + nil, // 61: master_pb.DataNodeInfo.DiskInfosEntry + nil, // 62: master_pb.RackInfo.DiskInfosEntry + nil, // 63: master_pb.DataCenterInfo.DiskInfosEntry + nil, // 64: master_pb.TopologyInfo.DiskInfosEntry + (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 65: master_pb.LookupEcVolumeResponse.EcShardIdLocation + (*ListClusterNodesResponse_ClusterNode)(nil), // 66: master_pb.ListClusterNodesResponse.ClusterNode + (*RaftListClusterServersResponse_ClusterServers)(nil), // 67: master_pb.RaftListClusterServersResponse.ClusterServers } var file_master_proto_depIdxs = []int32{ 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage @@ -4661,30 +4837,30 @@ var file_master_proto_depIdxs = []int32{ 4, // 3: master_pb.Heartbeat.ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage - 53, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry + 57, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry 5, // 7: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend - 54, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry - 55, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding + 58, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry + 59, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding 9, // 10: master_pb.KeepConnectedResponse.volume_location:type_name -> master_pb.VolumeLocation 10, // 11: master_pb.KeepConnectedResponse.cluster_node_update:type_name -> master_pb.ClusterNodeUpdate - 56, // 12: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation + 60, // 12: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation 14, // 13: master_pb.AssignResponse.replicas:type_name -> master_pb.Location 14, // 14: master_pb.AssignResponse.location:type_name -> master_pb.Location 19, // 15: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection 2, // 16: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage 4, // 17: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage - 57, // 18: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry + 61, // 18: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry 25, // 19: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo - 58, // 20: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry + 62, // 20: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry 26, // 21: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo - 59, // 22: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry + 63, // 22: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry 27, // 23: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo - 60, // 24: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry + 64, // 24: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry 28, // 25: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo - 61, // 26: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation + 65, // 26: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation 5, // 27: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend - 62, // 28: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode - 63, // 29: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers + 66, // 28: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode + 67, // 29: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers 14, // 30: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location 24, // 31: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo 24, // 32: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo @@ -4701,36 +4877,40 @@ var file_master_proto_depIdxs = []int32{ 29, // 43: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest 31, // 44: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest 33, // 45: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest - 35, // 46: master_pb.Seaweed.VolumeMarkReadonly:input_type -> master_pb.VolumeMarkReadonlyRequest - 37, // 47: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest - 39, // 48: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest - 41, // 49: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest - 43, // 50: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest - 45, // 51: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest - 51, // 52: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest - 47, // 53: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest - 49, // 54: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest - 1, // 55: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse - 11, // 56: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse - 13, // 57: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse - 16, // 58: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse - 18, // 59: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse - 21, // 60: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse - 23, // 61: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse - 30, // 62: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse - 32, // 63: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse - 34, // 64: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse - 36, // 65: master_pb.Seaweed.VolumeMarkReadonly:output_type -> master_pb.VolumeMarkReadonlyResponse - 38, // 66: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse - 40, // 67: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse - 42, // 68: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse - 44, // 69: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse - 46, // 70: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse - 52, // 71: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse - 48, // 72: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse - 50, // 73: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse - 55, // [55:74] is the sub-list for method output_type - 36, // [36:55] is the sub-list for method input_type + 35, // 46: master_pb.Seaweed.DisableVacuum:input_type -> master_pb.DisableVacuumRequest + 37, // 47: master_pb.Seaweed.EnableVacuum:input_type -> master_pb.EnableVacuumRequest + 39, // 48: master_pb.Seaweed.VolumeMarkReadonly:input_type -> master_pb.VolumeMarkReadonlyRequest + 41, // 49: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest + 43, // 50: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest + 45, // 51: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest + 47, // 52: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest + 49, // 53: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest + 55, // 54: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest + 51, // 55: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest + 53, // 56: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest + 1, // 57: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse + 11, // 58: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse + 13, // 59: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse + 16, // 60: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse + 18, // 61: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse + 21, // 62: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse + 23, // 63: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse + 30, // 64: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse + 32, // 65: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse + 34, // 66: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse + 36, // 67: master_pb.Seaweed.DisableVacuum:output_type -> master_pb.DisableVacuumResponse + 38, // 68: master_pb.Seaweed.EnableVacuum:output_type -> master_pb.EnableVacuumResponse + 40, // 69: master_pb.Seaweed.VolumeMarkReadonly:output_type -> master_pb.VolumeMarkReadonlyResponse + 42, // 70: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse + 44, // 71: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse + 46, // 72: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse + 48, // 73: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse + 50, // 74: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse + 56, // 75: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse + 52, // 76: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse + 54, // 77: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse + 57, // [57:78] is the sub-list for method output_type + 36, // [36:57] is the sub-list for method input_type 36, // [36:36] is the sub-list for extension type_name 36, // [36:36] is the sub-list for extension extendee 0, // [0:36] is the sub-list for field type_name @@ -5163,7 +5343,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeMarkReadonlyRequest); i { + switch v := v.(*DisableVacuumRequest); i { case 0: return &v.state case 1: @@ -5175,7 +5355,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeMarkReadonlyResponse); i { + switch v := v.(*DisableVacuumResponse); i { case 0: return &v.state case 1: @@ -5187,7 +5367,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMasterConfigurationRequest); i { + switch v := v.(*EnableVacuumRequest); i { case 0: return &v.state case 1: @@ -5199,7 +5379,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMasterConfigurationResponse); i { + switch v := v.(*EnableVacuumResponse); i { case 0: return &v.state case 1: @@ -5211,7 +5391,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClusterNodesRequest); i { + switch v := v.(*VolumeMarkReadonlyRequest); i { case 0: return &v.state case 1: @@ -5223,7 +5403,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClusterNodesResponse); i { + switch v := v.(*VolumeMarkReadonlyResponse); i { case 0: return &v.state case 1: @@ -5235,7 +5415,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LeaseAdminTokenRequest); i { + switch v := v.(*GetMasterConfigurationRequest); i { case 0: return &v.state case 1: @@ -5247,7 +5427,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LeaseAdminTokenResponse); i { + switch v := v.(*GetMasterConfigurationResponse); i { case 0: return &v.state case 1: @@ -5259,7 +5439,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReleaseAdminTokenRequest); i { + switch v := v.(*ListClusterNodesRequest); i { case 0: return &v.state case 1: @@ -5271,7 +5451,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReleaseAdminTokenResponse); i { + switch v := v.(*ListClusterNodesResponse); i { case 0: return &v.state case 1: @@ -5283,7 +5463,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingRequest); i { + switch v := v.(*LeaseAdminTokenRequest); i { case 0: return &v.state case 1: @@ -5295,7 +5475,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingResponse); i { + switch v := v.(*LeaseAdminTokenResponse); i { case 0: return &v.state case 1: @@ -5307,7 +5487,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RaftAddServerRequest); i { + switch v := v.(*ReleaseAdminTokenRequest); i { case 0: return &v.state case 1: @@ -5319,7 +5499,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RaftAddServerResponse); i { + switch v := v.(*ReleaseAdminTokenResponse); i { case 0: return &v.state case 1: @@ -5331,7 +5511,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RaftRemoveServerRequest); i { + switch v := v.(*PingRequest); i { case 0: return &v.state case 1: @@ -5343,7 +5523,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RaftRemoveServerResponse); i { + switch v := v.(*PingResponse); i { case 0: return &v.state case 1: @@ -5355,7 +5535,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RaftListClusterServersRequest); i { + switch v := v.(*RaftAddServerRequest); i { case 0: return &v.state case 1: @@ -5367,7 +5547,31 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RaftListClusterServersResponse); i { + switch v := v.(*RaftAddServerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftRemoveServerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftRemoveServerResponse); i { case 0: return &v.state case 1: @@ -5379,7 +5583,7 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SuperBlockExtra_ErasureCoding); i { + switch v := v.(*RaftListClusterServersRequest); i { case 0: return &v.state case 1: @@ -5391,6 +5595,30 @@ func file_master_proto_init() { } } file_master_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftListClusterServersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra_ErasureCoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LookupVolumeResponse_VolumeIdLocation); i { case 0: return &v.state @@ -5402,7 +5630,7 @@ func file_master_proto_init() { return nil } } - file_master_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_master_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LookupEcVolumeResponse_EcShardIdLocation); i { case 0: return &v.state @@ -5414,7 +5642,7 @@ func file_master_proto_init() { return nil } } - file_master_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_master_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListClusterNodesResponse_ClusterNode); i { case 0: return &v.state @@ -5426,7 +5654,7 @@ func file_master_proto_init() { return nil } } - file_master_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_master_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RaftListClusterServersResponse_ClusterServers); i { case 0: return &v.state @@ -5445,7 +5673,7 @@ func file_master_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_master_proto_rawDesc, NumEnums: 0, - NumMessages: 64, + NumMessages: 68, NumExtensions: 0, NumServices: 1, }, diff --git a/weed/pb/master_pb/master_grpc.pb.go b/weed/pb/master_pb/master_grpc.pb.go index 988f7f873..299cb105a 100644 --- a/weed/pb/master_pb/master_grpc.pb.go +++ b/weed/pb/master_pb/master_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.4 -// source: master.proto package master_pb @@ -32,6 +28,8 @@ type SeaweedClient interface { VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) + DisableVacuum(ctx context.Context, in *DisableVacuumRequest, opts ...grpc.CallOption) (*DisableVacuumResponse, error) + EnableVacuum(ctx context.Context, in *EnableVacuumRequest, opts ...grpc.CallOption) (*EnableVacuumResponse, error) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) ListClusterNodes(ctx context.Context, in *ListClusterNodesRequest, opts ...grpc.CallOption) (*ListClusterNodesResponse, error) @@ -185,6 +183,24 @@ func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeReques return out, nil } +func (c *seaweedClient) DisableVacuum(ctx context.Context, in *DisableVacuumRequest, opts ...grpc.CallOption) (*DisableVacuumResponse, error) { + out := new(DisableVacuumResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/DisableVacuum", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) EnableVacuum(ctx context.Context, in *EnableVacuumRequest, opts ...grpc.CallOption) (*EnableVacuumResponse, error) { + out := new(EnableVacuumResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/EnableVacuum", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *seaweedClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { out := new(VolumeMarkReadonlyResponse) err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeMarkReadonly", in, out, opts...) @@ -280,6 +296,8 @@ type SeaweedServer interface { VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) + DisableVacuum(context.Context, *DisableVacuumRequest) (*DisableVacuumResponse, error) + EnableVacuum(context.Context, *EnableVacuumRequest) (*EnableVacuumResponse, error) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) ListClusterNodes(context.Context, *ListClusterNodesRequest) (*ListClusterNodesResponse, error) @@ -326,6 +344,12 @@ func (UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolum func (UnimplementedSeaweedServer) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VacuumVolume not implemented") } +func (UnimplementedSeaweedServer) DisableVacuum(context.Context, *DisableVacuumRequest) (*DisableVacuumResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DisableVacuum not implemented") +} +func (UnimplementedSeaweedServer) EnableVacuum(context.Context, *EnableVacuumRequest) (*EnableVacuumResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EnableVacuum not implemented") +} func (UnimplementedSeaweedServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented") } @@ -562,6 +586,42 @@ func _Seaweed_VacuumVolume_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +func _Seaweed_DisableVacuum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DisableVacuumRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).DisableVacuum(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/DisableVacuum", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).DisableVacuum(ctx, req.(*DisableVacuumRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_EnableVacuum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EnableVacuumRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).EnableVacuum(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/EnableVacuum", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).EnableVacuum(ctx, req.(*EnableVacuumRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Seaweed_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeMarkReadonlyRequest) if err := dec(in); err != nil { @@ -763,6 +823,14 @@ var Seaweed_ServiceDesc = grpc.ServiceDesc{ MethodName: "VacuumVolume", Handler: _Seaweed_VacuumVolume_Handler, }, + { + MethodName: "DisableVacuum", + Handler: _Seaweed_DisableVacuum_Handler, + }, + { + MethodName: "EnableVacuum", + Handler: _Seaweed_EnableVacuum_Handler, + }, { MethodName: "VolumeMarkReadonly", Handler: _Seaweed_VolumeMarkReadonly_Handler, diff --git a/weed/pb/mount_pb/mount_grpc.pb.go b/weed/pb/mount_pb/mount_grpc.pb.go index 108b3da88..41737aa21 100644 --- a/weed/pb/mount_pb/mount_grpc.pb.go +++ b/weed/pb/mount_pb/mount_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.4 -// source: mount.proto package mount_pb diff --git a/weed/pb/mq_pb/mq.pb.go b/weed/pb/mq_pb/mq.pb.go index 8d3e2bab7..1640283de 100644 --- a/weed/pb/mq_pb/mq.pb.go +++ b/weed/pb/mq_pb/mq.pb.go @@ -20,7 +20,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// //////////////////////////////////////////////// +////////////////////////////////////////////////// type SegmentInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -617,7 +617,7 @@ func (x *CheckBrokerLoadResponse) GetBytesCount() int64 { return 0 } -// //////////////////////////////////////////////// +////////////////////////////////////////////////// type PublishRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/weed/pb/mq_pb/mq_grpc.pb.go b/weed/pb/mq_pb/mq_grpc.pb.go index 83be87a50..55b2b4fc2 100644 --- a/weed/pb/mq_pb/mq_grpc.pb.go +++ b/weed/pb/mq_pb/mq_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.4 -// source: mq.proto package mq_pb diff --git a/weed/pb/remote_pb/remote.pb.go b/weed/pb/remote_pb/remote.pb.go index 4acac6994..a1c8bb6c6 100644 --- a/weed/pb/remote_pb/remote.pb.go +++ b/weed/pb/remote_pb/remote.pb.go @@ -20,9 +20,9 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// /////////////////////// +///////////////////////// // Remote Storage related -// /////////////////////// +///////////////////////// type RemoteConf struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/weed/pb/s3_pb/s3_grpc.pb.go b/weed/pb/s3_pb/s3_grpc.pb.go index bbe340b65..1bc956be6 100644 --- a/weed/pb/s3_pb/s3_grpc.pb.go +++ b/weed/pb/s3_pb/s3_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.4 -// source: s3.proto package s3_pb diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index cb9a62ed8..4be12feea 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc-gen-go v1.28.1 +// protoc v3.21.4 // source: volume_server.proto package volume_server_pb diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index 1848097ec..77154972b 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -3,12 +3,13 @@ package weed_server import ( "context" "fmt" - "github.com/seaweedfs/raft" "reflect" "strings" "sync" "time" + "github.com/seaweedfs/raft" + "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -283,6 +284,20 @@ func (ms *MasterServer) VacuumVolume(ctx context.Context, req *master_pb.VacuumV return resp, nil } +func (ms *MasterServer) DisableVacuum(ctx context.Context, req *master_pb.DisableVacuumRequest) (*master_pb.DisableVacuumResponse, error) { + + ms.Topo.DisableVacuum() + resp := &master_pb.DisableVacuumResponse{} + return resp, nil +} + +func (ms *MasterServer) EnableVacuum(ctx context.Context, req *master_pb.EnableVacuumRequest) (*master_pb.EnableVacuumResponse, error) { + + ms.Topo.EnableVacuum() + resp := &master_pb.EnableVacuumResponse{} + return resp, nil +} + func (ms *MasterServer) VolumeMarkReadonly(ctx context.Context, req *master_pb.VolumeMarkReadonlyRequest) (*master_pb.VolumeMarkReadonlyResponse, error) { if !ms.Topo.IsLeader() { diff --git a/weed/shell/command_volume_vacuum_disable.go b/weed/shell/command_volume_vacuum_disable.go new file mode 100644 index 000000000..e285117c9 --- /dev/null +++ b/weed/shell/command_volume_vacuum_disable.go @@ -0,0 +1,41 @@ +package shell + +import ( + "context" + "io" + + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" +) + +func init() { + Commands = append(Commands, &commandDisableVacuum{}) +} + +type commandDisableVacuum struct { +} + +func (c *commandDisableVacuum) Name() string { + return "volume.vacuum.disable" +} + +func (c *commandDisableVacuum) Help() string { + return `disable vacuuming + + volume.vacuum.disable + +` +} + +func (c *commandDisableVacuum) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(args); err != nil { + return + } + + err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { + _, err = client.DisableVacuum(context.Background(), &master_pb.DisableVacuumRequest{}) + return err + }) + + return +} diff --git a/weed/shell/command_volume_vacuum_enable.go b/weed/shell/command_volume_vacuum_enable.go new file mode 100644 index 000000000..ae25c9433 --- /dev/null +++ b/weed/shell/command_volume_vacuum_enable.go @@ -0,0 +1,41 @@ +package shell + +import ( + "context" + "io" + + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" +) + +func init() { + Commands = append(Commands, &commandEnableVacuum{}) +} + +type commandEnableVacuum struct { +} + +func (c *commandEnableVacuum) Name() string { + return "volume.vacuum.enable" +} + +func (c *commandEnableVacuum) Help() string { + return `enable vacuuming + + volume.vacuum.enable + +` +} + +func (c *commandEnableVacuum) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(args); err != nil { + return + } + + err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { + _, err = client.EnableVacuum(context.Background(), &master_pb.EnableVacuumRequest{}) + return err + }) + + return +} diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 6c6fc73a4..671cdf32c 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -35,6 +35,7 @@ type Topology struct { volumeSizeLimit uint64 replicationAsMin bool + isDisableVacuum bool Sequence sequence.Sequencer @@ -338,3 +339,13 @@ func (t *Topology) DataNodeRegistration(dcName, rackName string, dn *DataNode) { rack.LinkChildNode(dn) glog.Infof("[%s] reLink To topo ", dn.Id()) } + +func (t *Topology) DisableVacuum() { + glog.V(0).Infof("DisableVacuum") + t.isDisableVacuum = true +} + +func (t *Topology) EnableVacuum() { + glog.V(0).Infof("EnableVacuum") + t.isDisableVacuum = false +} diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index d0d175a39..0a6c1a6c6 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -1,12 +1,13 @@ package topology import ( + "math/rand" + "time" + "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/types" "google.golang.org/grpc" - "math/rand" - "time" "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/storage" @@ -25,7 +26,9 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g go func(garbageThreshold float64) { for { if t.IsLeader() { - t.Vacuum(grpcDialOption, garbageThreshold, 0, "", preallocate) + if !t.isDisableVacuum { + t.Vacuum(grpcDialOption, garbageThreshold, 0, "", preallocate) + } } else { stats.MasterReplicaPlacementMismatch.Reset() } From ed5f3f073b6ddc9391b4a2c2febb0d9d8a1690ef Mon Sep 17 00:00:00 2001 From: Guo Lei Date: Thu, 29 Dec 2022 23:05:05 +0800 Subject: [PATCH 02/19] add more help message, in case of misunderstanding (#4092) --- weed/shell/command_volume_vacuum_disable.go | 2 +- weed/shell/command_volume_vacuum_enable.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/shell/command_volume_vacuum_disable.go b/weed/shell/command_volume_vacuum_disable.go index e285117c9..ddae744e5 100644 --- a/weed/shell/command_volume_vacuum_disable.go +++ b/weed/shell/command_volume_vacuum_disable.go @@ -19,7 +19,7 @@ func (c *commandDisableVacuum) Name() string { } func (c *commandDisableVacuum) Help() string { - return `disable vacuuming + return `disable vacuuming request from Master, however volume.vacuum still works. volume.vacuum.disable diff --git a/weed/shell/command_volume_vacuum_enable.go b/weed/shell/command_volume_vacuum_enable.go index ae25c9433..03284c92f 100644 --- a/weed/shell/command_volume_vacuum_enable.go +++ b/weed/shell/command_volume_vacuum_enable.go @@ -19,7 +19,7 @@ func (c *commandEnableVacuum) Name() string { } func (c *commandEnableVacuum) Help() string { - return `enable vacuuming + return `enable vacuuming request from Master volume.vacuum.enable From 265a56630b41c70fe0ae8c0dd9707de3c43f446d Mon Sep 17 00:00:00 2001 From: CommanderRoot Date: Sun, 1 Jan 2023 14:06:41 +0100 Subject: [PATCH 03/19] filer.store.mysql: Escape table columns in SQL query (#4095) --- weed/filer/mysql/mysql_sql_gen.go | 14 +++++++------- weed/filer/mysql/mysql_store.go | 2 +- weed/filer/mysql2/mysql2_store.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/weed/filer/mysql/mysql_sql_gen.go b/weed/filer/mysql/mysql_sql_gen.go index 3b0fc940e..d2fffc4c1 100644 --- a/weed/filer/mysql/mysql_sql_gen.go +++ b/weed/filer/mysql/mysql_sql_gen.go @@ -21,32 +21,32 @@ func (gen *SqlGenMysql) GetSqlInsert(tableName string) string { if gen.UpsertQueryTemplate != "" { return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) } else { - return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", tableName) + return fmt.Sprintf("INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES(?,?,?,?)", tableName) } } func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string { - return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", tableName) + return fmt.Sprintf("UPDATE `%s` SET `meta` = ? WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName) } func (gen *SqlGenMysql) GetSqlFind(tableName string) string { - return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) + return fmt.Sprintf("SELECT `meta` FROM `%s` WHERE `dirhash` = ? AND `name = ? AND `directory` = ?", tableName) } func (gen *SqlGenMysql) GetSqlDelete(tableName string) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) + return fmt.Sprintf("DELETE FROM `%s` WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName) } func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", tableName) + return fmt.Sprintf("DELETE FROM `%s` WHERE `dirhash` = ? AND `directory` = ?", tableName) } func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string { - return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) + return fmt.Sprintf("SELECT `name`, `meta` FROM `%s` WHERE `dirhash` = ? AND `name` > ? AND `directory` = ? AND `name` LIKE ? ORDER BY `name` ASC LIMIT ?", tableName) } func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string { - return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) + return fmt.Sprintf("SELECT `name`, `meta` FROM `%s` WHERE `dirhash` = ? AND `name` >= ? AND `directory` = ? AND `name` LIKE ? ORDER BY `name` ASC LIMIT ?", tableName) } func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string { diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go index 97e066ce3..ea40373d9 100644 --- a/weed/filer/mysql/mysql_store.go +++ b/weed/filer/mysql/mysql_store.go @@ -53,7 +53,7 @@ func (store *MysqlStore) initialize(upsertQuery string, enableUpsert bool, user, } store.SqlGenerator = &SqlGenMysql{ CreateTableSqlTemplate: "", - DropTableSqlTemplate: "drop table `%s`", + DropTableSqlTemplate: "DROP TABLE `%s`", UpsertQueryTemplate: upsertQuery, } diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go index 6d4229580..7008de171 100644 --- a/weed/filer/mysql2/mysql2_store.go +++ b/weed/filer/mysql2/mysql2_store.go @@ -58,7 +58,7 @@ func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpse } store.SqlGenerator = &mysql.SqlGenMysql{ CreateTableSqlTemplate: createTable, - DropTableSqlTemplate: "drop table `%s`", + DropTableSqlTemplate: "DROP TABLE `%s`", UpsertQueryTemplate: upsertQuery, } From c2280e94cf21dc5736b9f0c6c07ef611c8ed67c4 Mon Sep 17 00:00:00 2001 From: CommanderRoot Date: Sun, 1 Jan 2023 14:06:57 +0100 Subject: [PATCH 04/19] filer.store.mysql: Replace deprecated upsert syntax (#4096) --- weed/command/scaffold/filer.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml index d8833a917..423d0914e 100644 --- a/weed/command/scaffold/filer.toml +++ b/weed/command/scaffold/filer.toml @@ -61,7 +61,7 @@ connection_max_lifetime_seconds = 0 interpolateParams = false # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: enableUpsert = true -upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" +upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`""" [mysql2] # or memsql, tidb enabled = false @@ -85,7 +85,7 @@ connection_max_lifetime_seconds = 0 interpolateParams = false # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: enableUpsert = true -upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" +upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`""" [postgres] # or cockroachdb, YugabyteDB # CREATE TABLE IF NOT EXISTS filemeta ( From c7c9d22f37de989b20fd06aaf9c6ea1411028834 Mon Sep 17 00:00:00 2001 From: CommanderRoot Date: Sun, 1 Jan 2023 14:07:53 +0100 Subject: [PATCH 05/19] filer.store.mysql: Use utf8mb4 instead of 3 byte UTF8 (#4094) --- docker/seaweedfs.sql | 14 +++++++------- k8s/helm_charts2/README.md | 14 +++++++------- weed/command/scaffold/filer.toml | 26 +++++++++++++------------- weed/filer/mysql/mysql_store.go | 2 +- weed/filer/mysql2/mysql2_store.go | 2 +- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/docker/seaweedfs.sql b/docker/seaweedfs.sql index a27eb7081..c9974e0e6 100644 --- a/docker/seaweedfs.sql +++ b/docker/seaweedfs.sql @@ -3,10 +3,10 @@ CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret'; GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%'; FLUSH PRIVILEGES; USE seaweedfs; -CREATE TABLE IF NOT EXISTS filemeta ( - dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', - name VARCHAR(1000) COMMENT 'directory or file name', - directory TEXT COMMENT 'full path to parent directory', - meta LONGBLOB, - PRIMARY KEY (dirhash, name) -) DEFAULT CHARSET=utf8; \ No newline at end of file +CREATE TABLE IF NOT EXISTS `filemeta` ( + `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field', + `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name', + `directory` TEXT NOT NULL COMMENT 'full path to parent directory', + `meta` LONGBLOB, + PRIMARY KEY (`dirhash`, `name`) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; \ No newline at end of file diff --git a/k8s/helm_charts2/README.md b/k8s/helm_charts2/README.md index 715dcffa7..a510b02ca 100644 --- a/k8s/helm_charts2/README.md +++ b/k8s/helm_charts2/README.md @@ -14,13 +14,13 @@ with ENV. A running MySQL-compatible database is expected by default, as specified in the `values.yaml` at `filer.extraEnvironmentVars`. This database should be pre-configured and initialized by running: ```sql -CREATE TABLE IF NOT EXISTS filemeta ( - dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', - name VARCHAR(1000) BINARY COMMENT 'directory or file name', - directory TEXT BINARY COMMENT 'full path to parent directory', - meta LONGBLOB, - PRIMARY KEY (dirhash, name) -) DEFAULT CHARSET=utf8; +CREATE TABLE IF NOT EXISTS `filemeta` ( + `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field', + `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name', + `directory` TEXT NOT NULL COMMENT 'full path to parent directory', + `meta` LONGBLOB, + PRIMARY KEY (`dirhash`, `name`) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; ``` Alternative database can also be configured (e.g. leveldb) following the instructions at `filer.extraEnvironmentVars`. diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml index 423d0914e..88f9be2a6 100644 --- a/weed/command/scaffold/filer.toml +++ b/weed/command/scaffold/filer.toml @@ -41,13 +41,13 @@ enabled = false dbFile = "./filer.db" # sqlite db file [mysql] # or memsql, tidb -# CREATE TABLE IF NOT EXISTS filemeta ( -# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', -# name VARCHAR(1000) BINARY COMMENT 'directory or file name', -# directory TEXT BINARY COMMENT 'full path to parent directory', -# meta LONGBLOB, -# PRIMARY KEY (dirhash, name) -# ) DEFAULT CHARSET=utf8; +# CREATE TABLE IF NOT EXISTS `filemeta` ( +# `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field', +# `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name', +# `directory` TEXT NOT NULL COMMENT 'full path to parent directory', +# `meta` LONGBLOB, +# PRIMARY KEY (`dirhash`, `name`) +# ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; enabled = false hostname = "localhost" @@ -67,12 +67,12 @@ upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES ( enabled = false createTable = """ CREATE TABLE IF NOT EXISTS `%s` ( - dirhash BIGINT, - name VARCHAR(1000) BINARY, - directory TEXT BINARY, - meta LONGBLOB, - PRIMARY KEY (dirhash, name) - ) DEFAULT CHARSET=utf8; + `dirhash` BIGINT NOT NULL, + `name` VARCHAR(766) NOT NULL, + `directory` TEXT NOT NULL, + `meta` LONGBLOB, + PRIMARY KEY (`dirhash`, `name`) + ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; """ hostname = "localhost" port = 3306 diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go index ea40373d9..14566d49b 100644 --- a/weed/filer/mysql/mysql_store.go +++ b/weed/filer/mysql/mysql_store.go @@ -13,7 +13,7 @@ import ( ) const ( - CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?collation=utf8mb4_bin" ) func init() { diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go index 7008de171..acf621a00 100644 --- a/weed/filer/mysql2/mysql2_store.go +++ b/weed/filer/mysql2/mysql2_store.go @@ -15,7 +15,7 @@ import ( ) const ( - CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?collation=utf8mb4_bin" ) var _ filer.BucketAware = (*MysqlStore2)(nil) From 04bba1a6b29aca8a85c52ce20ec222af07d8708a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Jan 2023 12:24:15 -0800 Subject: [PATCH 06/19] build(deps): bump modernc.org/sqlite from 1.20.0 to 1.20.1 (#4098) --- updated-dependencies: - dependency-name: modernc.org/sqlite dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 16f9dec67..8f0a82ff9 100644 --- a/go.mod +++ b/go.mod @@ -130,11 +130,11 @@ require ( modernc.org/b v1.0.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect - modernc.org/libc v1.21.5 // indirect + modernc.org/libc v1.22.2 // indirect modernc.org/mathutil v1.5.0 // indirect modernc.org/memory v1.4.0 // indirect modernc.org/opt v0.1.3 // indirect - modernc.org/sqlite v1.20.0 + modernc.org/sqlite v1.20.1 modernc.org/strutil v1.1.3 modernc.org/token v1.0.1 // indirect ) diff --git a/go.sum b/go.sum index 84b26398a..d75a38924 100644 --- a/go.sum +++ b/go.sum @@ -2688,8 +2688,8 @@ modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= -modernc.org/libc v1.21.5 h1:xBkU9fnHV+hvZuPSRszN0AXDG4M7nwPLwTWwkYcvLCI= -modernc.org/libc v1.21.5/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= @@ -2697,8 +2697,8 @@ modernc.org/memory v1.4.0 h1:crykUfNSnMAXaOJnnxcSzbUGMqkLWjklJKkBK2nwZwk= modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.20.0 h1:80zmD3BGkm8BZ5fUi/4lwJQHiO3GXgIUvZRXpoIfROY= -modernc.org/sqlite v1.20.0/go.mod h1:EsYz8rfOvLCiYTy5ZFsOYzoCcRMu98YYkwAcCw5YIYw= +modernc.org/sqlite v1.20.1 h1:z6qRLw72B0VfRrJjs3l6hWkzYDx1bo0WGVrBGP4ohhM= +modernc.org/sqlite v1.20.1/go.mod h1:fODt+bFmc/j8LcoCbMSkAuKuGmhxjG45KGc25N2705M= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= From de787ce4aba29b75c0bc2f2ad38ea344b56239c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Jan 2023 12:24:28 -0800 Subject: [PATCH 07/19] build(deps): bump github.com/ydb-platform/ydb-go-sdk/v3 from 3.41.0 to 3.42.1 (#4099) build(deps): bump github.com/ydb-platform/ydb-go-sdk/v3 Bumps [github.com/ydb-platform/ydb-go-sdk/v3](https://github.com/ydb-platform/ydb-go-sdk) from 3.41.0 to 3.42.1. - [Release notes](https://github.com/ydb-platform/ydb-go-sdk/releases) - [Changelog](https://github.com/ydb-platform/ydb-go-sdk/blob/master/CHANGELOG.md) - [Commits](https://github.com/ydb-platform/ydb-go-sdk/compare/v3.41.0...v3.42.1) --- updated-dependencies: - dependency-name: github.com/ydb-platform/ydb-go-sdk/v3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8f0a82ff9..ad076e1f8 100644 --- a/go.mod +++ b/go.mod @@ -152,7 +152,7 @@ require ( github.com/schollz/progressbar/v3 v3.12.2 github.com/tikv/client-go/v2 v2.0.3 github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 - github.com/ydb-platform/ydb-go-sdk/v3 v3.41.0 + github.com/ydb-platform/ydb-go-sdk/v3 v3.42.1 golang.org/x/sync v0.1.0 google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1 ) diff --git a/go.sum b/go.sum index d75a38924..0bb992097 100644 --- a/go.sum +++ b/go.sum @@ -1692,8 +1692,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20221215182650-986f9d10542f/go.mo github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4= github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4= github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88= -github.com/ydb-platform/ydb-go-sdk/v3 v3.41.0 h1:2Kfj7I9EtQ8/o6gLB0mbShrZ/tC3/GFyjVE9z5CCLA0= -github.com/ydb-platform/ydb-go-sdk/v3 v3.41.0/go.mod h1:4bSfSb2PyBtmbFYsaVg96YxK1NjYBA3opn2o6IYvgZ8= +github.com/ydb-platform/ydb-go-sdk/v3 v3.42.1 h1:/LMslJT2s3C7wuqUsnqxyS7hB7viVTBZ0MpT6TbTBQs= +github.com/ydb-platform/ydb-go-sdk/v3 v3.42.1/go.mod h1:FoaWp3vsFN2e0YBvWoYbNaJJJi9Z+gCdmWiRdry5Zws= github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo= github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE= github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg= From 367353b936c450906e88e850c7d1e804f97c3560 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Jan 2023 12:24:46 -0800 Subject: [PATCH 08/19] build(deps): bump github.com/aws/aws-sdk-go from 1.44.167 to 1.44.171 (#4100) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.167 to 1.44.171. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.167...v1.44.171) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ad076e1f8..004be346c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Shopify/sarama v1.37.2 - github.com/aws/aws-sdk-go v1.44.167 + github.com/aws/aws-sdk-go v1.44.171 github.com/beorn7/perks v1.0.1 // indirect github.com/bwmarrin/snowflake v0.3.0 github.com/cespare/xxhash/v2 v2.1.2 // indirect diff --git a/go.sum b/go.sum index 0bb992097..31be195bd 100644 --- a/go.sum +++ b/go.sum @@ -239,8 +239,8 @@ github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4 github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.45/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.68/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.167 h1:kQmBhGdZkQLU7AiHShSkBJ15zr8agy0QeaxXduvyp2E= -github.com/aws/aws-sdk-go v1.44.167/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.171 h1:maREiPAmibvuONMOEZIkCH2OTosLRnDelceTtH3SYfo= +github.com/aws/aws-sdk-go v1.44.171/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.16.8 h1:gOe9UPR98XSf7oEJCcojYg+N2/jCRm4DdeIsP85pIyQ= github.com/aws/aws-sdk-go-v2 v1.16.8/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw= From d4566d4aaa426b33015780c7cc18f887fc07cca4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 2 Jan 2023 23:20:45 -0800 Subject: [PATCH 09/19] more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation --- weed/command/filer_copy.go | 8 +- weed/filer/filechunk_group.go | 148 ++++++++ weed/filer/filechunk_group_test.go | 36 ++ weed/filer/filechunk_manifest.go | 4 +- weed/filer/filechunk_section.go | 119 +++++++ weed/filer/filechunks.go | 248 ++++++------- weed/filer/filechunks_read.go | 106 +++--- weed/filer/filechunks_read_test.go | 86 ++++- weed/filer/filechunks_test.go | 214 ++++++------ weed/filer/filer_notify_append.go | 2 +- weed/filer/interval_list.go | 259 ++++++++++++++ weed/filer/interval_list_test.go | 327 ++++++++++++++++++ weed/filer/reader_at.go | 65 ++-- weed/filer/reader_at_test.go | 142 ++++---- weed/filer/reader_cache.go | 7 +- weed/filer/stream.go | 90 ++--- weed/mount/dirty_pages_chunked.go | 16 +- weed/mount/filehandle.go | 109 +++--- weed/mount/filehandle_map.go | 4 +- weed/mount/filehandle_read.go | 43 +-- weed/mount/page_writer.go | 12 +- weed/mount/page_writer/activity_score.go | 39 +++ weed/mount/page_writer/chunk_interval_list.go | 83 +++-- .../page_writer/chunk_interval_list_test.go | 72 ++-- weed/mount/page_writer/dirty_pages.go | 4 +- weed/mount/page_writer/page_chunk.go | 8 +- weed/mount/page_writer/page_chunk_mem.go | 31 +- weed/mount/page_writer/page_chunk_swapfile.go | 125 ++++--- weed/mount/page_writer/upload_pipeline.go | 40 ++- .../mount/page_writer/upload_pipeline_test.go | 4 +- weed/mount/weedfs_attr.go | 20 +- weed/mount/weedfs_file_copy_range.go | 12 +- weed/mount/weedfs_file_lseek.go | 43 +-- weed/mount/weedfs_file_read.go | 28 +- weed/mount/weedfs_file_sync.go | 15 +- weed/mount/weedfs_file_write.go | 15 +- weed/mount/weedfs_write.go | 4 +- weed/operation/upload_content.go | 4 +- .../replication/repl_util/replication_util.go | 7 +- .../filer_server_handlers_write_autochunk.go | 4 +- .../filer_server_handlers_write_cipher.go | 2 +- .../filer_server_handlers_write_upload.go | 2 +- weed/server/webdav_server.go | 30 +- weed/shell/command_fs_verify.go | 2 +- weed/shell/command_volume_fsck.go | 2 +- 45 files changed, 1835 insertions(+), 806 deletions(-) create mode 100644 weed/filer/filechunk_group.go create mode 100644 weed/filer/filechunk_group_test.go create mode 100644 weed/filer/filechunk_section.go create mode 100644 weed/filer/interval_list.go create mode 100644 weed/filer/interval_list_test.go create mode 100644 weed/mount/page_writer/activity_score.go diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 667b089ed..0c4626317 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -365,7 +365,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err if flushErr != nil { return flushErr } - chunks = append(chunks, uploadResult.ToPbFileChunk(finalFileId, 0)) + chunks = append(chunks, uploadResult.ToPbFileChunk(finalFileId, 0, time.Now().UnixNano())) } if err := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { @@ -450,7 +450,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, uploadError = fmt.Errorf("upload %v result: %v\n", fileName, uploadResult.Error) return } - chunksChan <- uploadResult.ToPbFileChunk(fileId, i*chunkSize) + chunksChan <- uploadResult.ToPbFileChunk(fileId, i*chunkSize, time.Now().UnixNano()) fmt.Printf("uploaded %s-%d [%d,%d)\n", fileName, i+1, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) }(i) @@ -530,7 +530,7 @@ func detectMimeType(f *os.File) string { return mimeType } -func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, err error) { +func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { finalFileId, uploadResult, flushErr, _ := operation.UploadWithRetry( worker, @@ -561,7 +561,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off if uploadResult.Error != "" { return nil, fmt.Errorf("upload result: %v", uploadResult.Error) } - return uploadResult.ToPbFileChunk(finalFileId, offset), nil + return uploadResult.ToPbFileChunk(finalFileId, offset, tsNs), nil } var _ = filer_pb.FilerClient(&FileCopyWorker{}) diff --git a/weed/filer/filechunk_group.go b/weed/filer/filechunk_group.go new file mode 100644 index 000000000..5dbf16a5c --- /dev/null +++ b/weed/filer/filechunk_group.go @@ -0,0 +1,148 @@ +package filer + +import ( + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" + "github.com/seaweedfs/seaweedfs/weed/wdclient" + "sync" +) + +type ChunkGroup struct { + lookupFn wdclient.LookupFileIdFunctionType + chunkCache chunk_cache.ChunkCache + manifestChunks []*filer_pb.FileChunk + sections map[SectionIndex]*FileChunkSection + sectionsLock sync.RWMutex +} + +func NewChunkGroup(lookupFn wdclient.LookupFileIdFunctionType, chunkCache chunk_cache.ChunkCache, chunks []*filer_pb.FileChunk) (*ChunkGroup, error) { + group := &ChunkGroup{ + lookupFn: lookupFn, + chunkCache: chunkCache, + sections: make(map[SectionIndex]*FileChunkSection), + } + + err := group.SetChunks(chunks) + return group, err +} + +func (group *ChunkGroup) AddChunk(chunk *filer_pb.FileChunk) error { + + group.sectionsLock.Lock() + defer group.sectionsLock.Unlock() + + sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize) + for si := sectionIndexStart; si < sectionIndexStop+1; si++ { + section, found := group.sections[si] + if !found { + section = NewFileChunkSection(si) + group.sections[si] = section + } + section.addChunk(chunk) + } + return nil +} + +func (group *ChunkGroup) ReadDataAt(fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) { + + group.sectionsLock.RLock() + defer group.sectionsLock.RUnlock() + + sectionIndexStart, sectionIndexStop := SectionIndex(offset/SectionSize), SectionIndex((offset+int64(len(buff)))/SectionSize) + for si := sectionIndexStart; si < sectionIndexStop+1; si++ { + section, found := group.sections[si] + rangeStart, rangeStop := max(offset, int64(si*SectionSize)), min(offset+int64(len(buff)), int64((si+1)*SectionSize)) + if !found { + for i := rangeStart; i < rangeStop; i++ { + buff[i-offset] = 0 + } + continue + } + xn, xTsNs, xErr := section.readDataAt(group, fileSize, buff[rangeStart-offset:rangeStop-offset], rangeStart) + if xErr != nil { + err = xErr + } + n += xn + tsNs = max(tsNs, xTsNs) + } + return +} + +func (group *ChunkGroup) SetChunks(chunks []*filer_pb.FileChunk) error { + var dataChunks []*filer_pb.FileChunk + for _, chunk := range chunks { + + if !chunk.IsChunkManifest { + dataChunks = append(dataChunks, chunk) + continue + } + + resolvedChunks, err := ResolveOneChunkManifest(group.lookupFn, chunk) + if err != nil { + return err + } + + group.manifestChunks = append(group.manifestChunks, chunk) + dataChunks = append(dataChunks, resolvedChunks...) + } + + for _, chunk := range dataChunks { + sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize) + for si := sectionIndexStart; si < sectionIndexStop+1; si++ { + section, found := group.sections[si] + if !found { + section = NewFileChunkSection(si) + group.sections[si] = section + } + section.chunks = append(section.chunks, chunk) + } + } + return nil +} + +const ( + // see weedfs_file_lseek.go + SEEK_DATA uint32 = 3 // seek to next data after the offset + // SEEK_HOLE uint32 = 4 // seek to next hole after the offset +) + +// FIXME: needa tests +func (group *ChunkGroup) SearchChunks(offset, fileSize int64, whence uint32) (found bool, out int64) { + group.sectionsLock.RLock() + defer group.sectionsLock.RUnlock() + + return group.doSearchChunks(offset, fileSize, whence) +} + +func (group *ChunkGroup) doSearchChunks(offset, fileSize int64, whence uint32) (found bool, out int64) { + + sectionIndex, maxSectionIndex := SectionIndex(offset/SectionSize), SectionIndex(fileSize/SectionSize) + if whence == SEEK_DATA { + for si := sectionIndex; si < maxSectionIndex+1; si++ { + section, foundSection := group.sections[si] + if !foundSection { + continue + } + sectionStart := section.DataStartOffset(group, offset, fileSize) + if sectionStart == -1 { + continue + } + return true, sectionStart + } + return false, 0 + } else { + // whence == SEEK_HOLE + for si := sectionIndex; si < maxSectionIndex; si++ { + section, foundSection := group.sections[si] + if !foundSection { + return true, offset + } + holeStart := section.NextStopOffset(group, offset, fileSize) + if holeStart%SectionSize == 0 { + continue + } + return true, holeStart + } + return true, fileSize + } +} diff --git a/weed/filer/filechunk_group_test.go b/weed/filer/filechunk_group_test.go new file mode 100644 index 000000000..d24d66a49 --- /dev/null +++ b/weed/filer/filechunk_group_test.go @@ -0,0 +1,36 @@ +package filer + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestChunkGroup_doSearchChunks(t *testing.T) { + type fields struct { + sections map[SectionIndex]*FileChunkSection + } + type args struct { + offset int64 + fileSize int64 + whence uint32 + } + tests := []struct { + name string + fields fields + args args + wantFound bool + wantOut int64 + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + group := &ChunkGroup{ + sections: tt.fields.sections, + } + gotFound, gotOut := group.doSearchChunks(tt.args.offset, tt.args.fileSize, tt.args.whence) + assert.Equalf(t, tt.wantFound, gotFound, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence) + assert.Equalf(t, tt.wantOut, gotOut, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence) + }) + } +} diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index 221a11ffe..d9d0331be 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -264,7 +264,7 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer } } - manifestChunk, err = saveFunc(bytes.NewReader(data), "", 0) + manifestChunk, err = saveFunc(bytes.NewReader(data), "", 0, 0) if err != nil { return nil, err } @@ -275,4 +275,4 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer return } -type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, err error) +type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) diff --git a/weed/filer/filechunk_section.go b/weed/filer/filechunk_section.go new file mode 100644 index 000000000..60c919569 --- /dev/null +++ b/weed/filer/filechunk_section.go @@ -0,0 +1,119 @@ +package filer + +import ( + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "sync" +) + +const SectionSize = 2 * 1024 * 1024 * 128 // 256MiB +type SectionIndex int64 +type FileChunkSection struct { + sectionIndex SectionIndex + chunks []*filer_pb.FileChunk + visibleIntervals *IntervalList[*VisibleInterval] + chunkViews *IntervalList[*ChunkView] + reader *ChunkReadAt + lock sync.Mutex +} + +func NewFileChunkSection(si SectionIndex) *FileChunkSection { + return &FileChunkSection{ + sectionIndex: si, + } +} + +func (section *FileChunkSection) addChunk(chunk *filer_pb.FileChunk) error { + section.lock.Lock() + defer section.lock.Unlock() + + start, stop := max(int64(section.sectionIndex)*SectionSize, chunk.Offset), min(((int64(section.sectionIndex)+1)*SectionSize), chunk.Offset+int64(chunk.Size)) + + section.chunks = append(section.chunks, chunk) + + if section.visibleIntervals != nil { + MergeIntoVisibles(section.visibleIntervals, start, stop, chunk) + } + + if section.visibleIntervals != nil { + section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks) + } + + if section.chunkViews != nil { + MergeIntoChunkViews(section.chunkViews, start, stop, chunk) + } + + return nil +} + +func (section *FileChunkSection) setupForRead(group *ChunkGroup, fileSize int64) { + if section.visibleIntervals == nil { + section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) + section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks) + if section.reader != nil { + _ = section.reader.Close() + section.reader = nil + } + } + if section.chunkViews == nil { + section.chunkViews = ViewFromVisibleIntervals(section.visibleIntervals, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) + } + + if section.reader == nil { + section.reader = NewChunkReaderAtFromClient(group.lookupFn, section.chunkViews, group.chunkCache, min(int64(section.sectionIndex+1)*SectionSize, fileSize)) + } + section.reader.fileSize = fileSize +} + +func (section *FileChunkSection) readDataAt(group *ChunkGroup, fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) { + section.lock.Lock() + defer section.lock.Unlock() + + section.setupForRead(group, fileSize) + + return section.reader.ReadAtWithTime(buff, offset) +} + +func (section *FileChunkSection) DataStartOffset(group *ChunkGroup, offset int64, fileSize int64) int64 { + section.lock.Lock() + defer section.lock.Unlock() + + section.setupForRead(group, fileSize) + + for x := section.visibleIntervals.Front(); x != nil; x = x.Next { + visible := x.Value + if visible.stop <= offset { + continue + } + if offset < visible.start { + return offset + } + return offset + } + return -1 +} + +func (section *FileChunkSection) NextStopOffset(group *ChunkGroup, offset int64, fileSize int64) int64 { + section.lock.Lock() + defer section.lock.Unlock() + + section.setupForRead(group, fileSize) + + isAfterOffset := false + for x := section.visibleIntervals.Front(); x != nil; x = x.Next { + visible := x.Value + if !isAfterOffset { + if visible.stop <= offset { + continue + } + isAfterOffset = true + } + if offset < visible.start { + return offset + } + // now visible.start <= offset + if offset < visible.stop { + offset = visible.stop + } + } + return offset +} diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index 061e0757a..d872bd22d 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "github.com/seaweedfs/seaweedfs/weed/wdclient" - "golang.org/x/exp/slices" "math" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" @@ -42,7 +41,7 @@ func ETag(entry *filer_pb.Entry) (etag string) { } func ETagEntry(entry *Entry) (etag string) { - if entry.IsInRemoteOnly() { + if entry.IsInRemoteOnly() { return entry.Remote.RemoteETag } if entry.Attr.Md5 == nil { @@ -66,8 +65,15 @@ func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64) + compacted, garbage = SeparateGarbageChunks(visibles, chunks) + + return +} + +func SeparateGarbageChunks(visibles *IntervalList[*VisibleInterval], chunks []*filer_pb.FileChunk) (compacted []*filer_pb.FileChunk, garbage []*filer_pb.FileChunk) { fileIds := make(map[string]bool) - for _, interval := range visibles { + for x := visibles.Front(); x != nil; x = x.Next { + interval := x.Value fileIds[interval.fileId] = true } for _, chunk := range chunks { @@ -77,8 +83,7 @@ func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks garbage = append(garbage, chunk) } } - - return + return compacted, garbage } func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) { @@ -131,20 +136,39 @@ func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_p } type ChunkView struct { - FileId string - Offset int64 - Size uint64 - LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk - ChunkSize uint64 - CipherKey []byte - IsGzipped bool + FileId string + OffsetInChunk int64 // offset within the chunk + ViewSize uint64 + ViewOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk + ChunkSize uint64 + CipherKey []byte + IsGzipped bool + ModifiedTsNs int64 +} + +func (cv *ChunkView) SetStartStop(start, stop int64) { + cv.OffsetInChunk += start - cv.ViewOffset + cv.ViewOffset = start + cv.ViewSize = uint64(stop - start) +} +func (cv *ChunkView) Clone() IntervalValue { + return &ChunkView{ + FileId: cv.FileId, + OffsetInChunk: cv.OffsetInChunk, + ViewSize: cv.ViewSize, + ViewOffset: cv.ViewOffset, + ChunkSize: cv.ChunkSize, + CipherKey: cv.CipherKey, + IsGzipped: cv.IsGzipped, + ModifiedTsNs: cv.ModifiedTsNs, + } } func (cv *ChunkView) IsFullChunk() bool { - return cv.Size == cv.ChunkSize + return cv.ViewSize == cv.ChunkSize } -func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) { +func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) { visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size) @@ -152,7 +176,7 @@ func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []* } -func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) { +func ViewFromVisibleIntervals(visibles *IntervalList[*VisibleInterval], offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) { stop := offset + size if size == math.MaxInt64 { @@ -162,164 +186,112 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int stop = math.MaxInt64 } - for _, chunk := range visibles { + chunkViews = NewIntervalList[*ChunkView]() + for x := visibles.Front(); x != nil; x = x.Next { + chunk := x.Value chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop) if chunkStart < chunkStop { - views = append(views, &ChunkView{ - FileId: chunk.fileId, - Offset: chunkStart - chunk.start + chunk.chunkOffset, - Size: uint64(chunkStop - chunkStart), - LogicOffset: chunkStart, - ChunkSize: chunk.chunkSize, - CipherKey: chunk.cipherKey, - IsGzipped: chunk.isGzipped, + chunkView := &ChunkView{ + FileId: chunk.fileId, + OffsetInChunk: chunkStart - chunk.start + chunk.offsetInChunk, + ViewSize: uint64(chunkStop - chunkStart), + ViewOffset: chunkStart, + ChunkSize: chunk.chunkSize, + CipherKey: chunk.cipherKey, + IsGzipped: chunk.isGzipped, + ModifiedTsNs: chunk.modifiedTsNs, + } + chunkViews.AppendInterval(&Interval[*ChunkView]{ + StartOffset: chunkStart, + StopOffset: chunkStop, + TsNs: chunk.modifiedTsNs, + Value: chunkView, + Prev: nil, + Next: nil, }) } } - return views + return chunkViews } -func logPrintf(name string, visibles []VisibleInterval) { - - /* - glog.V(0).Infof("%s len %d", name, len(visibles)) - for _, v := range visibles { - glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset) - } - */ -} - -func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) { - - newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.ModifiedTsNs, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed) - - length := len(visibles) - if length == 0 { - return append(visibles, newV) - } - last := visibles[length-1] - if last.stop <= chunk.Offset { - return append(visibles, newV) +func MergeIntoVisibles(visibles *IntervalList[*VisibleInterval], start int64, stop int64, chunk *filer_pb.FileChunk) { + + newV := &VisibleInterval{ + start: start, + stop: stop, + fileId: chunk.GetFileIdString(), + modifiedTsNs: chunk.ModifiedTsNs, + offsetInChunk: start - chunk.Offset, // the starting position in the chunk + chunkSize: chunk.Size, // size of the chunk + cipherKey: chunk.CipherKey, + isGzipped: chunk.IsCompressed, } - logPrintf(" before", visibles) - // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size) - chunkStop := chunk.Offset + int64(chunk.Size) - for _, v := range visibles { - if v.start < chunk.Offset && chunk.Offset < v.stop { - t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTsNs, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) - newVisibles = append(newVisibles, t) - // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) - } - if v.start < chunkStop && chunkStop < v.stop { - t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTsNs, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) - newVisibles = append(newVisibles, t) - // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) - } - if chunkStop <= v.start || v.stop <= chunk.Offset { - newVisibles = append(newVisibles, v) - // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop) - } - } - newVisibles = append(newVisibles, newV) - - logPrintf(" append", newVisibles) + visibles.InsertInterval(start, stop, chunk.ModifiedTsNs, newV) +} - for i := len(newVisibles) - 1; i >= 0; i-- { - if i > 0 && newV.start < newVisibles[i-1].start { - newVisibles[i] = newVisibles[i-1] - } else { - newVisibles[i] = newV - break - } +func MergeIntoChunkViews(chunkViews *IntervalList[*ChunkView], start int64, stop int64, chunk *filer_pb.FileChunk) { + + chunkView := &ChunkView{ + FileId: chunk.GetFileIdString(), + OffsetInChunk: start - chunk.Offset, + ViewSize: uint64(stop - start), + ViewOffset: start, + ChunkSize: chunk.Size, + CipherKey: chunk.CipherKey, + IsGzipped: chunk.IsCompressed, + ModifiedTsNs: chunk.ModifiedTsNs, } - logPrintf(" sorted", newVisibles) - return newVisibles + chunkViews.InsertInterval(start, stop, chunk.ModifiedTsNs, chunkView) } // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory // If the file chunk content is a chunk manifest -func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) { +func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval], err error) { chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset) if err != nil { return } - visibles2 := readResolvedChunks(chunks) - - if true { - return visibles2, err - } - slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { - if a.ModifiedTsNs == b.ModifiedTsNs { - filer_pb.EnsureFid(a) - filer_pb.EnsureFid(b) - if a.Fid == nil || b.Fid == nil { - return true - } - return a.Fid.FileKey < b.Fid.FileKey - } - return a.ModifiedTsNs < b.ModifiedTsNs - }) - for _, chunk := range chunks { - - // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size)) - visibles = MergeIntoVisibles(visibles, chunk) - - logPrintf("add", visibles) + visibles2 := readResolvedChunks(chunks, 0, math.MaxInt64) - } - - if len(visibles) != len(visibles2) { - fmt.Printf("different visibles size %d : %d\n", len(visibles), len(visibles2)) - } else { - for i := 0; i < len(visibles); i++ { - checkDifference(visibles[i], visibles2[i]) - } - } - - return -} - -func checkDifference(x, y VisibleInterval) { - if x.start != y.start || - x.stop != y.stop || - x.fileId != y.fileId || - x.modifiedTsNs != y.modifiedTsNs { - fmt.Printf("different visible %+v : %+v\n", x, y) - } + return visibles2, err } // find non-overlapping visible intervals // visible interval map to one file chunk type VisibleInterval struct { - start int64 - stop int64 - modifiedTsNs int64 - fileId string - chunkOffset int64 - chunkSize uint64 - cipherKey []byte - isGzipped bool + start int64 + stop int64 + modifiedTsNs int64 + fileId string + offsetInChunk int64 + chunkSize uint64 + cipherKey []byte + isGzipped bool } -func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval { - return VisibleInterval{ - start: start, - stop: stop, - fileId: fileId, - modifiedTsNs: modifiedTime, - chunkOffset: chunkOffset, // the starting position in the chunk - chunkSize: chunkSize, - cipherKey: cipherKey, - isGzipped: isGzipped, +func (v *VisibleInterval) SetStartStop(start, stop int64) { + v.offsetInChunk += start - v.start + v.start, v.stop = start, stop +} +func (v *VisibleInterval) Clone() IntervalValue { + return &VisibleInterval{ + start: v.start, + stop: v.stop, + modifiedTsNs: v.modifiedTsNs, + fileId: v.fileId, + offsetInChunk: v.offsetInChunk, + chunkSize: v.chunkSize, + cipherKey: v.cipherKey, + isGzipped: v.isGzipped, } } diff --git a/weed/filer/filechunks_read.go b/weed/filer/filechunks_read.go index 8a15f6e7a..8b2d36e12 100644 --- a/weed/filer/filechunks_read.go +++ b/weed/filer/filechunks_read.go @@ -1,14 +1,22 @@ package filer import ( + "container/list" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "golang.org/x/exp/slices" ) -func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) { +func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval]) { var points []*Point for _, chunk := range chunks { + if chunk.IsChunkManifest { + println("This should not happen! A manifest chunk found:", chunk.GetFileIdString()) + } + start, stop := max(chunk.Offset, startOffset), min(chunk.Offset+int64(chunk.Size), stopOffset) + if start >= stop { + continue + } points = append(points, &Point{ x: chunk.Offset, ts: chunk.ModifiedTsNs, @@ -33,40 +41,45 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva }) var prevX int64 - var queue []*Point + queue := list.New() // points with higher ts are at the tail + visibles = NewIntervalList[*VisibleInterval]() + var prevPoint *Point for _, point := range points { + if queue.Len() > 0 { + prevPoint = queue.Back().Value.(*Point) + } else { + prevPoint = nil + } if point.isStart { - if len(queue) > 0 { - lastIndex := len(queue) - 1 - lastPoint := queue[lastIndex] - if point.x != prevX && lastPoint.ts < point.ts { - visibles = addToVisibles(visibles, prevX, lastPoint, point) + if prevPoint != nil { + if point.x != prevX && prevPoint.ts < point.ts { + addToVisibles(visibles, prevX, prevPoint, point) prevX = point.x } } // insert into queue - for i := len(queue); i >= 0; i-- { - if i == 0 || queue[i-1].ts <= point.ts { - if i == len(queue) { - prevX = point.x + if prevPoint == nil || prevPoint.ts < point.ts { + queue.PushBack(point) + prevX = point.x + } else { + for e := queue.Front(); e != nil; e = e.Next() { + if e.Value.(*Point).ts > point.ts { + queue.InsertBefore(point, e) + break } - queue = addToQueue(queue, i, point) - break } } } else { - lastIndex := len(queue) - 1 - index := lastIndex - var startPoint *Point - for ; index >= 0; index-- { - startPoint = queue[index] - if startPoint.ts == point.ts { - queue = removeFromQueue(queue, index) + isLast := true + for e := queue.Back(); e != nil; e = e.Prev() { + if e.Value.(*Point).ts == point.ts { + queue.Remove(e) break } + isLast = false } - if index == lastIndex && startPoint != nil { - visibles = addToVisibles(visibles, prevX, startPoint, point) + if isLast && prevPoint != nil { + addToVisibles(visibles, prevX, prevPoint, point) prevX = point.x } } @@ -75,37 +88,30 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva return } -func removeFromQueue(queue []*Point, index int) []*Point { - for i := index; i < len(queue)-1; i++ { - queue[i] = queue[i+1] - } - queue = queue[:len(queue)-1] - return queue -} - -func addToQueue(queue []*Point, index int, point *Point) []*Point { - queue = append(queue, point) - for i := len(queue) - 1; i > index; i-- { - queue[i], queue[i-1] = queue[i-1], queue[i] - } - return queue -} - -func addToVisibles(visibles []VisibleInterval, prevX int64, startPoint *Point, point *Point) []VisibleInterval { +func addToVisibles(visibles *IntervalList[*VisibleInterval], prevX int64, startPoint *Point, point *Point) { if prevX < point.x { chunk := startPoint.chunk - visibles = append(visibles, VisibleInterval{ - start: prevX, - stop: point.x, - fileId: chunk.GetFileIdString(), - modifiedTsNs: chunk.ModifiedTsNs, - chunkOffset: prevX - chunk.Offset, - chunkSize: chunk.Size, - cipherKey: chunk.CipherKey, - isGzipped: chunk.IsCompressed, - }) + visible := &VisibleInterval{ + start: prevX, + stop: point.x, + fileId: chunk.GetFileIdString(), + modifiedTsNs: chunk.ModifiedTsNs, + offsetInChunk: prevX - chunk.Offset, + chunkSize: chunk.Size, + cipherKey: chunk.CipherKey, + isGzipped: chunk.IsCompressed, + } + appendVisibleInterfal(visibles, visible) } - return visibles +} + +func appendVisibleInterfal(visibles *IntervalList[*VisibleInterval], visible *VisibleInterval) { + visibles.AppendInterval(&Interval[*VisibleInterval]{ + StartOffset: visible.start, + StopOffset: visible.stop, + TsNs: visible.modifiedTsNs, + Value: visible, + }) } type Point struct { diff --git a/weed/filer/filechunks_read_test.go b/weed/filer/filechunks_read_test.go index d4bfca72e..c66a874bc 100644 --- a/weed/filer/filechunks_read_test.go +++ b/weed/filer/filechunks_read_test.go @@ -3,6 +3,7 @@ package filer import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "math" "math/rand" "testing" ) @@ -42,9 +43,38 @@ func TestReadResolvedChunks(t *testing.T) { }, } - visibles := readResolvedChunks(chunks) + visibles := readResolvedChunks(chunks, 0, math.MaxInt64) - for _, visible := range visibles { + fmt.Printf("resolved to %d visible intervales\n", visibles.Len()) + for x := visibles.Front(); x != nil; x = x.Next { + visible := x.Value + fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) + } + +} + +func TestReadResolvedChunks2(t *testing.T) { + + chunks := []*filer_pb.FileChunk{ + { + FileId: "c", + Offset: 200, + Size: 50, + ModifiedTsNs: 3, + }, + { + FileId: "e", + Offset: 200, + Size: 25, + ModifiedTsNs: 5, + }, + } + + visibles := readResolvedChunks(chunks, 0, math.MaxInt64) + + fmt.Printf("resolved to %d visible intervales\n", visibles.Len()) + for x := visibles.Front(); x != nil; x = x.Next { + visible := x.Value fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) } @@ -72,9 +102,10 @@ func TestRandomizedReadResolvedChunks(t *testing.T) { chunks = append(chunks, randomWrite(array, start, size, ts)) } - visibles := readResolvedChunks(chunks) + visibles := readResolvedChunks(chunks, 0, math.MaxInt64) - for _, visible := range visibles { + for x := visibles.Front(); x != nil; x = x.Next { + visible := x.Value for i := visible.start; i < visible.stop; i++ { if array[i] != visible.modifiedTsNs { t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTsNs) @@ -112,9 +143,9 @@ func TestSequentialReadResolvedChunks(t *testing.T) { }) } - visibles := readResolvedChunks(chunks) + visibles := readResolvedChunks(chunks, 0, math.MaxInt64) - fmt.Printf("visibles %d", len(visibles)) + fmt.Printf("visibles %d", visibles.Len()) } @@ -201,9 +232,48 @@ func TestActualReadResolvedChunks(t *testing.T) { }, } - visibles := readResolvedChunks(chunks) + visibles := readResolvedChunks(chunks, 0, math.MaxInt64) + + for x := visibles.Front(); x != nil; x = x.Next { + visible := x.Value + fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) + } + +} + +func TestActualReadResolvedChunks2(t *testing.T) { + + chunks := []*filer_pb.FileChunk{ + { + FileId: "1,e7b96fef48", + Offset: 0, + Size: 184320, + ModifiedTsNs: 1, + }, + { + FileId: "2,22562640b9", + Offset: 184320, + Size: 4096, + ModifiedTsNs: 2, + }, + { + FileId: "2,33562640b9", + Offset: 184320, + Size: 4096, + ModifiedTsNs: 4, + }, + { + FileId: "4,df033e0fe4", + Offset: 188416, + Size: 2097152, + ModifiedTsNs: 3, + }, + } + + visibles := readResolvedChunks(chunks, 0, math.MaxInt64) - for _, visible := range visibles { + for x := visibles.Front(); x != nil; x = x.Next { + visible := x.Value fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) } diff --git a/weed/filer/filechunks_test.go b/weed/filer/filechunks_test.go index d29e0a600..b448950a9 100644 --- a/weed/filer/filechunks_test.go +++ b/weed/filer/filechunks_test.go @@ -92,7 +92,8 @@ func TestRandomFileChunksCompact(t *testing.T) { visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64) - for _, v := range visibles { + for visible := visibles.Front(); visible != nil; visible = visible.Next { + v := visible.Value for x := v.start; x < v.stop; x++ { assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId) } @@ -137,7 +138,7 @@ func TestIntervalMerging(t *testing.T) { }, Expected: []*VisibleInterval{ {start: 0, stop: 70, fileId: "b"}, - {start: 70, stop: 100, fileId: "a", chunkOffset: 70}, + {start: 70, stop: 100, fileId: "a", offsetInChunk: 70}, }, }, // case 3: updates overwrite full chunks @@ -174,15 +175,15 @@ func TestIntervalMerging(t *testing.T) { }, Expected: []*VisibleInterval{ {start: 0, stop: 200, fileId: "d"}, - {start: 200, stop: 220, fileId: "c", chunkOffset: 130}, + {start: 200, stop: 220, fileId: "c", offsetInChunk: 130}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, - {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123}, - {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123}, + {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125}, }, Expected: []*VisibleInterval{ {start: 0, stop: 100, fileId: "xyz"}, @@ -228,11 +229,17 @@ func TestIntervalMerging(t *testing.T) { for i, testcase := range testcases { log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i) intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64) - for x, interval := range intervals { - log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s", - i, x, interval.start, interval.stop, interval.fileId) + x := -1 + for visible := intervals.Front(); visible != nil; visible = visible.Next { + x++ + interval := visible.Value + log.Printf("test case %d, interval start=%d, stop=%d, fileId=%s", + i, interval.start, interval.stop, interval.fileId) } - for x, interval := range intervals { + x = -1 + for visible := intervals.Front(); visible != nil; visible = visible.Next { + x++ + interval := visible.Value if interval.start != testcase.Expected[x].start { t.Fatalf("failed on test case %d, interval %d, start %d, expect %d", i, x, interval.start, testcase.Expected[x].start) @@ -245,13 +252,13 @@ func TestIntervalMerging(t *testing.T) { t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s", i, x, interval.fileId, testcase.Expected[x].fileId) } - if interval.chunkOffset != testcase.Expected[x].chunkOffset { - t.Fatalf("failed on test case %d, interval %d, chunkOffset %d, expect %d", - i, x, interval.chunkOffset, testcase.Expected[x].chunkOffset) + if interval.offsetInChunk != testcase.Expected[x].offsetInChunk { + t.Fatalf("failed on test case %d, interval %d, offsetInChunk %d, expect %d", + i, x, interval.offsetInChunk, testcase.Expected[x].offsetInChunk) } } - if len(intervals) != len(testcase.Expected) { - t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected)) + if intervals.Len() != len(testcase.Expected) { + t.Fatalf("failed to compact test case %d, len %d expected %d", i, intervals.Len(), len(testcase.Expected)) } } @@ -276,9 +283,9 @@ func TestChunksReading(t *testing.T) { Offset: 0, Size: 250, Expected: []*ChunkView{ - {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, - {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, - {Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200}, + {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0}, + {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100}, + {OffsetInChunk: 0, ViewSize: 50, FileId: "fsad", ViewOffset: 200}, }, }, // case 1: updates overwrite full chunks @@ -290,7 +297,7 @@ func TestChunksReading(t *testing.T) { Offset: 50, Size: 100, Expected: []*ChunkView{ - {Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50}, + {OffsetInChunk: 50, ViewSize: 100, FileId: "asdf", ViewOffset: 50}, }, }, // case 2: updates overwrite part of previous chunks @@ -302,8 +309,8 @@ func TestChunksReading(t *testing.T) { Offset: 30, Size: 40, Expected: []*ChunkView{ - {Offset: 20, Size: 30, FileId: "b", LogicOffset: 30}, - {Offset: 57, Size: 10, FileId: "a", LogicOffset: 60}, + {OffsetInChunk: 20, ViewSize: 30, FileId: "b", ViewOffset: 30}, + {OffsetInChunk: 57, ViewSize: 10, FileId: "a", ViewOffset: 60}, }, }, // case 3: updates overwrite full chunks @@ -316,8 +323,8 @@ func TestChunksReading(t *testing.T) { Offset: 0, Size: 200, Expected: []*ChunkView{ - {Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0}, - {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50}, + {OffsetInChunk: 0, ViewSize: 50, FileId: "asdf", ViewOffset: 0}, + {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 50}, }, }, // case 4: updates far away from prev chunks @@ -330,8 +337,8 @@ func TestChunksReading(t *testing.T) { Offset: 0, Size: 400, Expected: []*ChunkView{ - {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, - {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 250}, + {OffsetInChunk: 0, ViewSize: 200, FileId: "asdf", ViewOffset: 0}, + {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 250}, }, }, // case 5: updates overwrite full chunks @@ -345,21 +352,21 @@ func TestChunksReading(t *testing.T) { Offset: 0, Size: 220, Expected: []*ChunkView{ - {Offset: 0, Size: 200, FileId: "c", LogicOffset: 0}, - {Offset: 130, Size: 20, FileId: "b", LogicOffset: 200}, + {OffsetInChunk: 0, ViewSize: 200, FileId: "c", ViewOffset: 0}, + {OffsetInChunk: 130, ViewSize: 20, FileId: "b", ViewOffset: 200}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, - {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123}, - {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123}, + {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125}, }, Offset: 0, Size: 100, Expected: []*ChunkView{ - {Offset: 0, Size: 100, FileId: "xyz", LogicOffset: 0}, + {OffsetInChunk: 0, ViewSize: 100, FileId: "xyz", ViewOffset: 0}, }, }, // case 7: edge cases @@ -372,8 +379,8 @@ func TestChunksReading(t *testing.T) { Offset: 0, Size: 200, Expected: []*ChunkView{ - {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, - {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, + {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0}, + {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100}, }, }, // case 8: edge cases @@ -386,9 +393,9 @@ func TestChunksReading(t *testing.T) { Offset: 0, Size: 300, Expected: []*ChunkView{ - {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, - {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, - {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, + {OffsetInChunk: 0, ViewSize: 90, FileId: "abc", ViewOffset: 0}, + {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 90}, + {OffsetInChunk: 0, ViewSize: 110, FileId: "fsad", ViewOffset: 190}, }, }, // case 9: edge cases @@ -404,12 +411,12 @@ func TestChunksReading(t *testing.T) { Offset: 0, Size: 153578836, Expected: []*ChunkView{ - {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, - {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, - {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, - {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, - {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, - {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + {OffsetInChunk: 0, ViewSize: 43175936, FileId: "2,111fc2cbfac1", ViewOffset: 0}, + {OffsetInChunk: 0, ViewSize: 52981760 - 43175936, FileId: "2,112a36ea7f85", ViewOffset: 43175936}, + {OffsetInChunk: 0, ViewSize: 72564736 - 52981760, FileId: "4,112d5f31c5e7", ViewOffset: 52981760}, + {OffsetInChunk: 0, ViewSize: 133255168 - 72564736, FileId: "1,113245f0cdb6", ViewOffset: 72564736}, + {OffsetInChunk: 0, ViewSize: 137269248 - 133255168, FileId: "3,1141a70733b5", ViewOffset: 133255168}, + {OffsetInChunk: 0, ViewSize: 153578836 - 137269248, FileId: "1,114201d5bbdb", ViewOffset: 137269248}, }, }, } @@ -420,28 +427,31 @@ func TestChunksReading(t *testing.T) { } log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i) chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size) - for x, chunk := range chunks { + x := -1 + for c := chunks.Front(); c != nil; c = c.Next { + x++ + chunk := c.Value log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s", - i, x, chunk.Offset, chunk.Size, chunk.FileId) - if chunk.Offset != testcase.Expected[x].Offset { + i, x, chunk.OffsetInChunk, chunk.ViewSize, chunk.FileId) + if chunk.OffsetInChunk != testcase.Expected[x].OffsetInChunk { t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d", - i, chunk.FileId, chunk.Offset, testcase.Expected[x].Offset) + i, chunk.FileId, chunk.OffsetInChunk, testcase.Expected[x].OffsetInChunk) } - if chunk.Size != testcase.Expected[x].Size { - t.Fatalf("failed on read case %d, chunk %s, Size %d, expect %d", - i, chunk.FileId, chunk.Size, testcase.Expected[x].Size) + if chunk.ViewSize != testcase.Expected[x].ViewSize { + t.Fatalf("failed on read case %d, chunk %s, ViewSize %d, expect %d", + i, chunk.FileId, chunk.ViewSize, testcase.Expected[x].ViewSize) } if chunk.FileId != testcase.Expected[x].FileId { t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s", i, x, chunk.FileId, testcase.Expected[x].FileId) } - if chunk.LogicOffset != testcase.Expected[x].LogicOffset { - t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d", - i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset) + if chunk.ViewOffset != testcase.Expected[x].ViewOffset { + t.Fatalf("failed on read case %d, chunk %d, ViewOffset %d, expect %d", + i, x, chunk.ViewOffset, testcase.Expected[x].ViewOffset) } } - if len(chunks) != len(testcase.Expected) { - t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected)) + if chunks.Len() != len(testcase.Expected) { + t.Fatalf("failed to read test case %d, len %d expected %d", i, chunks.Len(), len(testcase.Expected)) } } @@ -467,73 +477,79 @@ func BenchmarkCompactFileChunks(b *testing.B) { } } +func addVisibleInterval(visibles *IntervalList[*VisibleInterval], x *VisibleInterval) { + visibles.AppendInterval(&Interval[*VisibleInterval]{ + StartOffset: x.start, + StopOffset: x.stop, + TsNs: x.modifiedTsNs, + Value: x, + }) +} + func TestViewFromVisibleIntervals(t *testing.T) { - visibles := []VisibleInterval{ - { - start: 0, - stop: 25, - fileId: "fid1", - }, - { - start: 4096, - stop: 8192, - fileId: "fid2", - }, - { - start: 16384, - stop: 18551, - fileId: "fid3", - }, - } + visibles := NewIntervalList[*VisibleInterval]() + addVisibleInterval(visibles, &VisibleInterval{ + start: 0, + stop: 25, + fileId: "fid1", + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 4096, + stop: 8192, + fileId: "fid2", + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 16384, + stop: 18551, + fileId: "fid3", + }) views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) - if len(views) != len(visibles) { - assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + if views.Len() != visibles.Len() { + assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error") } } func TestViewFromVisibleIntervals2(t *testing.T) { - visibles := []VisibleInterval{ - { - start: 344064, - stop: 348160, - fileId: "fid1", - }, - { - start: 348160, - stop: 356352, - fileId: "fid2", - }, - } + visibles := NewIntervalList[*VisibleInterval]() + addVisibleInterval(visibles, &VisibleInterval{ + start: 344064, + stop: 348160, + fileId: "fid1", + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 348160, + stop: 356352, + fileId: "fid2", + }) views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) - if len(views) != len(visibles) { - assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + if views.Len() != visibles.Len() { + assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error") } } func TestViewFromVisibleIntervals3(t *testing.T) { - visibles := []VisibleInterval{ - { - start: 1000, - stop: 2000, - fileId: "fid1", - }, - { - start: 3000, - stop: 4000, - fileId: "fid2", - }, - } + visibles := NewIntervalList[*VisibleInterval]() + addVisibleInterval(visibles, &VisibleInterval{ + start: 1000, + stop: 2000, + fileId: "fid1", + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 3000, + stop: 4000, + fileId: "fid2", + }) views := ViewFromVisibleIntervals(visibles, 1700, 1500) - if len(views) != len(visibles) { - assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + if views.Len() != visibles.Len() { + assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error") } } diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go index 5c03d4f16..55278c492 100644 --- a/weed/filer/filer_notify_append.go +++ b/weed/filer/filer_notify_append.go @@ -40,7 +40,7 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error { } // append to existing chunks - entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset)) + entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset, time.Now().UnixNano())) // update the entry err = f.CreateEntry(context.Background(), entry, false, false, nil, false) diff --git a/weed/filer/interval_list.go b/weed/filer/interval_list.go new file mode 100644 index 000000000..b3d2a76b9 --- /dev/null +++ b/weed/filer/interval_list.go @@ -0,0 +1,259 @@ +package filer + +import ( + "math" + "sync" +) + +type IntervalValue interface { + SetStartStop(start, stop int64) + Clone() IntervalValue +} + +type Interval[T IntervalValue] struct { + StartOffset int64 + StopOffset int64 + TsNs int64 + Value T + Prev *Interval[T] + Next *Interval[T] +} + +func (interval *Interval[T]) Size() int64 { + return interval.StopOffset - interval.StartOffset +} + +// IntervalList mark written intervals within one page chunk +type IntervalList[T IntervalValue] struct { + head *Interval[T] + tail *Interval[T] + Lock sync.Mutex +} + +func NewIntervalList[T IntervalValue]() *IntervalList[T] { + list := &IntervalList[T]{ + head: &Interval[T]{ + StartOffset: -1, + StopOffset: -1, + }, + tail: &Interval[T]{ + StartOffset: math.MaxInt64, + StopOffset: math.MaxInt64, + }, + } + return list +} + +func (list *IntervalList[T]) Front() (interval *Interval[T]) { + return list.head.Next +} + +func (list *IntervalList[T]) AppendInterval(interval *Interval[T]) { + list.Lock.Lock() + defer list.Lock.Unlock() + + if list.head.Next == nil { + list.head.Next = interval + } + interval.Prev = list.tail.Prev + if list.tail.Prev != nil { + list.tail.Prev.Next = interval + } + list.tail.Prev = interval +} + +func (list *IntervalList[T]) Overlay(startOffset, stopOffset, tsNs int64, value T) { + if startOffset >= stopOffset { + return + } + interval := &Interval[T]{ + StartOffset: startOffset, + StopOffset: stopOffset, + TsNs: tsNs, + Value: value, + } + + list.Lock.Lock() + defer list.Lock.Unlock() + + list.overlayInterval(interval) +} + +func (list *IntervalList[T]) InsertInterval(startOffset, stopOffset, tsNs int64, value T) { + interval := &Interval[T]{ + StartOffset: startOffset, + StopOffset: stopOffset, + TsNs: tsNs, + Value: value, + } + + list.Lock.Lock() + defer list.Lock.Unlock() + + value.SetStartStop(startOffset, stopOffset) + list.insertInterval(interval) +} + +func (list *IntervalList[T]) insertInterval(interval *Interval[T]) { + prev := list.head + next := prev.Next + + for interval.StartOffset < interval.StopOffset { + if next == nil { + // add to the end + list.insertBetween(prev, interval, list.tail) + break + } + + // interval is ahead of the next + if interval.StopOffset <= next.StartOffset { + list.insertBetween(prev, interval, next) + break + } + + // interval is after the next + if next.StopOffset <= interval.StartOffset { + prev = next + next = next.Next + continue + } + + // intersecting next and interval + if interval.TsNs >= next.TsNs { + // interval is newer + if next.StartOffset < interval.StartOffset { + // left side of next is ahead of interval + t := &Interval[T]{ + StartOffset: next.StartOffset, + StopOffset: interval.StartOffset, + TsNs: next.TsNs, + Value: next.Value.Clone().(T), + } + t.Value.SetStartStop(t.StartOffset, t.StopOffset) + list.insertBetween(prev, t, interval) + next.StartOffset = interval.StartOffset + next.Value.SetStartStop(next.StartOffset, next.StopOffset) + prev = t + } + if interval.StopOffset < next.StopOffset { + // right side of next is after interval + next.StartOffset = interval.StopOffset + next.Value.SetStartStop(next.StartOffset, next.StopOffset) + list.insertBetween(prev, interval, next) + break + } else { + // next is covered + prev.Next = interval + next = next.Next + } + } else { + // next is newer + if interval.StartOffset < next.StartOffset { + // left side of interval is ahead of next + t := &Interval[T]{ + StartOffset: interval.StartOffset, + StopOffset: next.StartOffset, + TsNs: interval.TsNs, + Value: interval.Value.Clone().(T), + } + t.Value.SetStartStop(t.StartOffset, t.StopOffset) + list.insertBetween(prev, t, next) + interval.StartOffset = next.StartOffset + interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset) + } + if next.StopOffset < interval.StopOffset { + // right side of interval is after next + interval.StartOffset = next.StopOffset + interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset) + } else { + // interval is covered + break + } + } + + } +} + +func (list *IntervalList[T]) insertBetween(a, interval, b *Interval[T]) { + a.Next = interval + b.Prev = interval + if a != list.head { + interval.Prev = a + } + if b != list.tail { + interval.Next = b + } +} + +func (list *IntervalList[T]) overlayInterval(interval *Interval[T]) { + + //t := list.head + //for ; t.Next != nil; t = t.Next { + // if t.TsNs > interval.TsNs { + // println("writes is out of order", t.TsNs-interval.TsNs, "ns") + // } + //} + + p := list.head + for ; p.Next != nil && p.Next.StopOffset <= interval.StartOffset; p = p.Next { + } + q := list.tail + for ; q.Prev != nil && q.Prev.StartOffset >= interval.StopOffset; q = q.Prev { + } + + // left side + // interval after p.Next start + if p.Next != nil && p.Next.StartOffset < interval.StartOffset { + t := &Interval[T]{ + StartOffset: p.Next.StartOffset, + StopOffset: interval.StartOffset, + TsNs: p.Next.TsNs, + Value: p.Next.Value, + } + p.Next = t + if p != list.head { + t.Prev = p + } + t.Next = interval + interval.Prev = t + } else { + p.Next = interval + if p != list.head { + interval.Prev = p + } + } + + // right side + // interval ends before p.Prev + if q.Prev != nil && interval.StopOffset < q.Prev.StopOffset { + t := &Interval[T]{ + StartOffset: interval.StopOffset, + StopOffset: q.Prev.StopOffset, + TsNs: q.Prev.TsNs, + Value: q.Prev.Value, + } + q.Prev = t + if q != list.tail { + t.Next = q + } + interval.Next = t + t.Prev = interval + } else { + q.Prev = interval + if q != list.tail { + interval.Next = q + } + } + +} + +func (list *IntervalList[T]) Len() int { + list.Lock.Lock() + defer list.Lock.Unlock() + + var count int + for t := list.head; t != nil; t = t.Next { + count++ + } + return count - 1 +} diff --git a/weed/filer/interval_list_test.go b/weed/filer/interval_list_test.go new file mode 100644 index 000000000..dea510fed --- /dev/null +++ b/weed/filer/interval_list_test.go @@ -0,0 +1,327 @@ +package filer + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +type IntervalInt int + +func (i IntervalInt) SetStartStop(start, stop int64) { +} +func (i IntervalInt) Clone() IntervalValue { + return i +} + +func TestIntervalList_Overlay(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(0, 100, 1, 1) + list.Overlay(50, 150, 2, 2) + list.Overlay(200, 250, 3, 3) + list.Overlay(225, 250, 4, 4) + list.Overlay(175, 210, 5, 5) + list.Overlay(0, 25, 6, 6) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 6, list.Len()) + println() + list.Overlay(50, 150, 7, 7) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 6, list.Len()) +} + +func TestIntervalList_Overlay2(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(0, 50, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } +} + +func TestIntervalList_Overlay3(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + assert.Equal(t, 1, list.Len()) + + list.Overlay(0, 60, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_Overlay4(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(0, 100, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 1, list.Len()) +} + +func TestIntervalList_Overlay5(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(0, 110, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 1, list.Len()) +} + +func TestIntervalList_Overlay6(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(50, 110, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 1, list.Len()) +} + +func TestIntervalList_Overlay7(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(50, 90, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_Overlay8(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(60, 90, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 3, list.Len()) +} + +func TestIntervalList_Overlay9(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(60, 100, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_Overlay10(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(50, 100, 1, 1) + list.Overlay(60, 110, 2, 2) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_Overlay11(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.Overlay(0, 100, 1, 1) + list.Overlay(100, 110, 2, 2) + list.Overlay(0, 90, 3, 3) + list.Overlay(0, 80, 4, 4) + list.Overlay(0, 90, 5, 5) + list.Overlay(90, 90, 6, 6) + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 3, list.Len()) +} + +func TestIntervalList_insertInterval1(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.InsertInterval(50, 150, 2, 2) + list.InsertInterval(200, 250, 3, 3) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_insertInterval2(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.InsertInterval(50, 150, 2, 2) + list.InsertInterval(0, 25, 3, 3) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_insertInterval3(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.InsertInterval(50, 150, 2, 2) + list.InsertInterval(200, 250, 4, 4) + + list.InsertInterval(0, 75, 3, 3) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 3, list.Len()) +} + +func TestIntervalList_insertInterval4(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.InsertInterval(200, 250, 4, 4) + + list.InsertInterval(0, 225, 3, 3) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_insertInterval5(t *testing.T) { + list := NewIntervalList[IntervalInt]() + list.InsertInterval(200, 250, 4, 4) + + list.InsertInterval(0, 225, 5, 5) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_insertInterval6(t *testing.T) { + list := NewIntervalList[IntervalInt]() + + list.InsertInterval(50, 150, 2, 2) + list.InsertInterval(200, 250, 4, 4) + + list.InsertInterval(0, 275, 1, 1) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 5, list.Len()) +} + +func TestIntervalList_insertInterval7(t *testing.T) { + list := NewIntervalList[IntervalInt]() + + list.InsertInterval(50, 150, 2, 2) + list.InsertInterval(200, 250, 4, 4) + + list.InsertInterval(75, 275, 1, 1) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 4, list.Len()) +} + +func TestIntervalList_insertInterval8(t *testing.T) { + list := NewIntervalList[IntervalInt]() + + list.InsertInterval(50, 150, 2, 2) + list.InsertInterval(200, 250, 4, 4) + + list.InsertInterval(75, 275, 3, 3) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 4, list.Len()) +} + +func TestIntervalList_insertInterval9(t *testing.T) { + list := NewIntervalList[IntervalInt]() + + list.InsertInterval(50, 150, 2, 2) + list.InsertInterval(200, 250, 4, 4) + + list.InsertInterval(50, 150, 3, 3) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 2, list.Len()) +} + +func TestIntervalList_insertInterval10(t *testing.T) { + list := NewIntervalList[IntervalInt]() + + list.InsertInterval(50, 100, 2, 2) + + list.InsertInterval(200, 300, 4, 4) + + list.InsertInterval(100, 200, 5, 5) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 3, list.Len()) +} + +func TestIntervalList_insertInterval11(t *testing.T) { + list := NewIntervalList[IntervalInt]() + + list.InsertInterval(0, 64, 1, 1) + + list.InsertInterval(72, 136, 3, 3) + + list.InsertInterval(64, 128, 2, 2) + + list.InsertInterval(68, 72, 4, 4) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 4, list.Len()) +} + +type IntervalStruct struct { + x int + start int64 + stop int64 +} + +func newIntervalStruct(i int) IntervalStruct { + return IntervalStruct{ + x: i, + } +} + +func (i IntervalStruct) SetStartStop(start, stop int64) { + i.start, i.stop = start, stop +} +func (i IntervalStruct) Clone() IntervalValue { + return &IntervalStruct{ + x: i.x, + start: i.start, + stop: i.stop, + } +} + +func TestIntervalList_insertIntervalStruct(t *testing.T) { + list := NewIntervalList[IntervalStruct]() + + list.InsertInterval(0, 64, 1, newIntervalStruct(1)) + + list.InsertInterval(64, 72, 2, newIntervalStruct(2)) + + list.InsertInterval(72, 136, 3, newIntervalStruct(3)) + + list.InsertInterval(64, 68, 4, newIntervalStruct(4)) + + for p := list.Front(); p != nil; p = p.Next { + fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) + } + assert.Equal(t, 4, list.Len()) +} diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go index 9d1fab20a..27e8f79a6 100644 --- a/weed/filer/reader_at.go +++ b/weed/filer/reader_at.go @@ -16,8 +16,7 @@ import ( type ChunkReadAt struct { masterClient *wdclient.MasterClient - chunkViews []*ChunkView - readerLock sync.Mutex + chunkViews *IntervalList[*ChunkView] fileSize int64 readerCache *ReaderCache readerPattern *ReaderPattern @@ -89,7 +88,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp } } -func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt { +func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews *IntervalList[*ChunkView], chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt { return &ChunkReadAt{ chunkViews: chunkViews, @@ -108,44 +107,58 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { c.readerPattern.MonitorReadAt(offset, len(p)) - c.readerLock.Lock() - defer c.readerLock.Unlock() + c.chunkViews.Lock.Lock() + defer c.chunkViews.Lock.Unlock() + + // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + n, _, err = c.doReadAt(p, offset) + return +} + +func (c *ChunkReadAt) ReadAtWithTime(p []byte, offset int64) (n int, ts int64, err error) { + + c.readerPattern.MonitorReadAt(offset, len(p)) + + c.chunkViews.Lock.Lock() + defer c.chunkViews.Lock.Unlock() // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) return c.doReadAt(p, offset) } -func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { +func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err error) { startOffset, remaining := offset, int64(len(p)) - var nextChunks []*ChunkView - for i, chunk := range c.chunkViews { + var nextChunks *Interval[*ChunkView] + for x := c.chunkViews.Front(); x != nil; x = x.Next { + chunk := x.Value if remaining <= 0 { break } - if i+1 < len(c.chunkViews) { - nextChunks = c.chunkViews[i+1:] + if x.Next != nil { + nextChunks = x.Next } - if startOffset < chunk.LogicOffset { - gap := chunk.LogicOffset - startOffset - glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.LogicOffset) + if startOffset < chunk.ViewOffset { + gap := chunk.ViewOffset - startOffset + glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset) n += zero(p, startOffset-offset, gap) - startOffset, remaining = chunk.LogicOffset, remaining-gap + startOffset, remaining = chunk.ViewOffset, remaining-gap if remaining <= 0 { break } } - // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) - chunkStart, chunkStop := max(chunk.LogicOffset, startOffset), min(chunk.LogicOffset+int64(chunk.Size), startOffset+remaining) + // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) + chunkStart, chunkStop := max(chunk.ViewOffset, startOffset), min(chunk.ViewOffset+int64(chunk.ViewSize), startOffset+remaining) if chunkStart >= chunkStop { continue } - // glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size)) - bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset + // glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize)) + bufferOffset := chunkStart - chunk.ViewOffset + chunk.OffsetInChunk + ts = chunk.ModifiedTsNs copied, err := c.readChunkSliceAt(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset)) if err != nil { glog.Errorf("fetching chunk %+v: %v\n", chunk, err) - return copied, err + return copied, ts, err } n += copied @@ -177,7 +190,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { } -func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews []*ChunkView, offset uint64) (n int, err error) { +func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews *Interval[*ChunkView], offset uint64) (n int, err error) { if c.readerPattern.IsRandomMode() { n, err := c.readerCache.chunkCache.ReadChunkAt(buffer, chunkView.FileId, offset) @@ -187,16 +200,14 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset)) } - n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.LogicOffset == 0) + n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.ViewOffset == 0) if c.lastChunkFid != chunkView.FileId { - if chunkView.Offset == 0 { // start of a new chunk + if chunkView.OffsetInChunk == 0 { // start of a new chunk if c.lastChunkFid != "" { c.readerCache.UnCache(c.lastChunkFid) - c.readerCache.MaybeCache(nextChunkViews) - } else { - if len(nextChunkViews) >= 1 { - c.readerCache.MaybeCache(nextChunkViews[:1]) // just read the next chunk if at the very beginning - } + } + if nextChunkViews != nil { + c.readerCache.MaybeCache(nextChunkViews) // just read the next chunk if at the very beginning } } } diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go index 29bd47ea4..f61d68a6d 100644 --- a/weed/filer/reader_at_test.go +++ b/weed/filer/reader_at_test.go @@ -5,7 +5,6 @@ import ( "io" "math" "strconv" - "sync" "testing" ) @@ -34,42 +33,40 @@ func (m *mockChunkCache) SetChunk(fileId string, data []byte) { func TestReaderAt(t *testing.T) { - visibles := []VisibleInterval{ - { - start: 1, - stop: 2, - fileId: "1", - chunkSize: 9, - }, - { - start: 3, - stop: 4, - fileId: "3", - chunkSize: 1, - }, - { - start: 5, - stop: 6, - fileId: "5", - chunkSize: 2, - }, - { - start: 7, - stop: 9, - fileId: "7", - chunkSize: 2, - }, - { - start: 9, - stop: 10, - fileId: "9", - chunkSize: 2, - }, - } + visibles := NewIntervalList[*VisibleInterval]() + addVisibleInterval(visibles, &VisibleInterval{ + start: 1, + stop: 2, + fileId: "1", + chunkSize: 9, + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 3, + stop: 4, + fileId: "3", + chunkSize: 1, + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 5, + stop: 6, + fileId: "5", + chunkSize: 2, + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 7, + stop: 9, + fileId: "7", + chunkSize: 2, + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 9, + stop: 10, + fileId: "9", + chunkSize: 2, + }) readerAt := &ChunkReadAt{ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), - readerLock: sync.Mutex{}, fileSize: 10, readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), @@ -86,7 +83,7 @@ func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, exp if data == nil { data = make([]byte, size) } - n, err := readerAt.doReadAt(data, offset) + n, _, err := readerAt.doReadAt(data, offset) if expectedN != n { t.Errorf("unexpected read size: %d, expect: %d", n, expectedN) @@ -101,24 +98,22 @@ func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, exp func TestReaderAt0(t *testing.T) { - visibles := []VisibleInterval{ - { - start: 2, - stop: 5, - fileId: "1", - chunkSize: 9, - }, - { - start: 7, - stop: 9, - fileId: "2", - chunkSize: 9, - }, - } + visibles := NewIntervalList[*VisibleInterval]() + addVisibleInterval(visibles, &VisibleInterval{ + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 7, + stop: 9, + fileId: "2", + chunkSize: 9, + }) readerAt := &ChunkReadAt{ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), - readerLock: sync.Mutex{}, fileSize: 10, readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), @@ -135,18 +130,16 @@ func TestReaderAt0(t *testing.T) { func TestReaderAt1(t *testing.T) { - visibles := []VisibleInterval{ - { - start: 2, - stop: 5, - fileId: "1", - chunkSize: 9, - }, - } + visibles := NewIntervalList[*VisibleInterval]() + addVisibleInterval(visibles, &VisibleInterval{ + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }) readerAt := &ChunkReadAt{ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), - readerLock: sync.Mutex{}, fileSize: 20, readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), @@ -164,24 +157,22 @@ func TestReaderAt1(t *testing.T) { } func TestReaderAtGappedChunksDoNotLeak(t *testing.T) { - visibles := []VisibleInterval{ - { - start: 2, - stop: 3, - fileId: "1", - chunkSize: 5, - }, - { - start: 7, - stop: 9, - fileId: "1", - chunkSize: 4, - }, - } + visibles := NewIntervalList[*VisibleInterval]() + addVisibleInterval(visibles, &VisibleInterval{ + start: 2, + stop: 3, + fileId: "1", + chunkSize: 5, + }) + addVisibleInterval(visibles, &VisibleInterval{ + start: 7, + stop: 9, + fileId: "1", + chunkSize: 4, + }) readerAt := &ChunkReadAt{ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), - readerLock: sync.Mutex{}, fileSize: 9, readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), @@ -193,8 +184,7 @@ func TestReaderAtGappedChunksDoNotLeak(t *testing.T) { func TestReaderAtSparseFileDoesNotLeak(t *testing.T) { readerAt := &ChunkReadAt{ - chunkViews: ViewFromVisibleIntervals([]VisibleInterval{}, 0, math.MaxInt64), - readerLock: sync.Mutex{}, + chunkViews: ViewFromVisibleIntervals(NewIntervalList[*VisibleInterval](), 0, math.MaxInt64), fileSize: 3, readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go index cb89c03c5..0a7c83de7 100644 --- a/weed/filer/reader_cache.go +++ b/weed/filer/reader_cache.go @@ -43,7 +43,7 @@ func newReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn } } -func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) { +func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { if rc.lookupFileIdFn == nil { return } @@ -55,7 +55,8 @@ func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) { return } - for _, chunkView := range chunkViews { + for x := chunkViews; x != nil; x = x.Next { + chunkView := x.Value if _, found := rc.downloaders[chunkView.FileId]; found { continue } @@ -65,7 +66,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) { return } - // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.LogicOffset) + // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset) // cache this chunk if not yet cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false) go cacher.startCaching() diff --git a/weed/filer/stream.go b/weed/filer/stream.go index f28341be4..d49784686 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -6,7 +6,6 @@ import ( "golang.org/x/exp/slices" "io" "math" - "sort" "strings" "sync" "time" @@ -78,7 +77,8 @@ func StreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, w fileId2Url := make(map[string][]string) - for _, chunkView := range chunkViews { + for x := chunkViews.Front(); x != nil; x = x.Next { + chunkView := x.Value var urlStrings []string var err error for _, backoff := range getLookupFileIdBackoffSchedule { @@ -102,29 +102,30 @@ func StreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, w downloadThrottler := util.NewWriteThrottler(downloadMaxBytesPs) remaining := size - for _, chunkView := range chunkViews { - if offset < chunkView.LogicOffset { - gap := chunkView.LogicOffset - offset + for x := chunkViews.Front(); x != nil; x = x.Next { + chunkView := x.Value + if offset < chunkView.ViewOffset { + gap := chunkView.ViewOffset - offset remaining -= gap - glog.V(4).Infof("zero [%d,%d)", offset, chunkView.LogicOffset) + glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset) err := writeZero(writer, gap) if err != nil { - return fmt.Errorf("write zero [%d,%d)", offset, chunkView.LogicOffset) + return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset) } - offset = chunkView.LogicOffset + offset = chunkView.ViewOffset } urlStrings := fileId2Url[chunkView.FileId] start := time.Now() - err := retriedStreamFetchChunkData(writer, urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) - offset += int64(chunkView.Size) - remaining -= int64(chunkView.Size) + err := retriedStreamFetchChunkData(writer, urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize)) + offset += int64(chunkView.ViewSize) + remaining -= int64(chunkView.ViewSize) stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds()) if err != nil { stats.FilerRequestCounter.WithLabelValues("chunkDownloadError").Inc() return fmt.Errorf("read chunk: %v", err) } stats.FilerRequestCounter.WithLabelValues("chunkDownload").Inc() - downloadThrottler.MaybeSlowdown(int64(chunkView.Size)) + downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize)) } if remaining > 0 { glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining) @@ -167,14 +168,15 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer idx := 0 - for _, chunkView := range chunkViews { + for x := chunkViews.Front(); x != nil; x = x.Next { + chunkView := x.Value urlStrings, err := lookupFileIdFn(chunkView.FileId) if err != nil { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } - n, err := retriedFetchChunkData(buffer[idx:idx+int(chunkView.Size)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset) + n, err := retriedFetchChunkData(buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk) if err != nil { return err } @@ -185,7 +187,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer // ---------------- ChunkStreamReader ---------------------------------- type ChunkStreamReader struct { - chunkViews []*ChunkView + chunkView *Interval[*ChunkView] totalSize int64 logicOffset int64 buffer []byte @@ -201,17 +203,15 @@ var _ = io.ReaderAt(&ChunkStreamReader{}) func doNewChunkStreamReader(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) *ChunkStreamReader { chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) - slices.SortFunc(chunkViews, func(a, b *ChunkView) bool { - return a.LogicOffset < b.LogicOffset - }) var totalSize int64 - for _, chunk := range chunkViews { - totalSize += int64(chunk.Size) + for x := chunkViews.Front(); x != nil; x = x.Next { + chunk := x.Value + totalSize += int64(chunk.ViewSize) } return &ChunkStreamReader{ - chunkViews: chunkViews, + chunkView: chunkViews.Front(), lookupFileId: lookupFileIdFn, totalSize: totalSize, } @@ -290,7 +290,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { } func insideChunk(offset int64, chunk *ChunkView) bool { - return chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) + return chunk.ViewOffset <= offset && offset < chunk.ViewOffset+int64(chunk.ViewSize) } func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { @@ -300,48 +300,22 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { } // fmt.Printf("fetch for offset %d\n", offset) - - // need to seek to a different chunk - currentChunkIndex := sort.Search(len(c.chunkViews), func(i int) bool { - return offset < c.chunkViews[i].LogicOffset - }) - if currentChunkIndex == len(c.chunkViews) { - // not found - if insideChunk(offset, c.chunkViews[0]) { - // fmt.Printf("select0 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId) - currentChunkIndex = 0 - } else if insideChunk(offset, c.chunkViews[len(c.chunkViews)-1]) { - currentChunkIndex = len(c.chunkViews) - 1 - // fmt.Printf("select last chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId) - } else { - return io.EOF - } - } else if currentChunkIndex > 0 { - if insideChunk(offset, c.chunkViews[currentChunkIndex]) { - // good hit - } else if insideChunk(offset, c.chunkViews[currentChunkIndex-1]) { - currentChunkIndex -= 1 - // fmt.Printf("select -1 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId) - } else { - // glog.Fatalf("unexpected1 offset %d", offset) - return fmt.Errorf("unexpected1 offset %d", offset) - } - } else { - // glog.Fatalf("unexpected2 offset %d", offset) - return fmt.Errorf("unexpected2 offset %d", offset) + c.chunkView = c.chunkView.Next + if c.chunkView == nil { + return io.EOF } // positioning within the new chunk - chunk := c.chunkViews[currentChunkIndex] + chunk := c.chunkView.Value if insideChunk(offset, chunk) { - if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset { + if c.isBufferEmpty() || c.bufferOffset != chunk.ViewOffset { if err = c.fetchChunkToBuffer(chunk); err != nil { return } } } else { - // glog.Fatalf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) - return fmt.Errorf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) + // glog.Fatalf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) + return fmt.Errorf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) } return } @@ -355,7 +329,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { var buffer bytes.Buffer var shouldRetry bool for _, urlString := range urlStrings { - shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { + shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize), func(data []byte) { buffer.Write(data) }) if !shouldRetry { @@ -372,10 +346,10 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { return err } c.buffer = buffer.Bytes() - c.bufferOffset = chunkView.LogicOffset + c.bufferOffset = chunkView.ViewOffset c.chunk = chunkView.FileId - // glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + // glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize)) return nil } diff --git a/weed/mount/dirty_pages_chunked.go b/weed/mount/dirty_pages_chunked.go index 78e7b7877..56c97549f 100644 --- a/weed/mount/dirty_pages_chunked.go +++ b/weed/mount/dirty_pages_chunked.go @@ -7,7 +7,6 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "io" "sync" - "time" ) type ChunkedDirtyPages struct { @@ -38,11 +37,11 @@ func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages { return dirtyPages } -func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte, isSequential bool) { +func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) { pages.hasWrites = true glog.V(4).Infof("%v memory AddPage [%d, %d)", pages.fh.fh, offset, offset+int64(len(data))) - pages.uploadPipeline.SaveDataAt(data, offset, isSequential) + pages.uploadPipeline.SaveDataAt(data, offset, isSequential, tsNs) return } @@ -58,28 +57,27 @@ func (pages *ChunkedDirtyPages) FlushData() error { return nil } -func (pages *ChunkedDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) { +func (pages *ChunkedDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64, tsNs int64) (maxStop int64) { if !pages.hasWrites { return } - return pages.uploadPipeline.MaybeReadDataAt(data, startOffset) + return pages.uploadPipeline.MaybeReadDataAt(data, startOffset, tsNs) } -func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reader, offset int64, size int64, cleanupFn func()) { +func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reader, offset int64, size int64, modifiedTsNs int64, cleanupFn func()) { - mtime := time.Now().UnixNano() defer cleanupFn() fileFullPath := pages.fh.FullPath() fileName := fileFullPath.Name() - chunk, err := pages.fh.wfs.saveDataAsChunk(fileFullPath)(reader, fileName, offset) + chunk, err := pages.fh.wfs.saveDataAsChunk(fileFullPath)(reader, fileName, offset, modifiedTsNs) if err != nil { glog.V(0).Infof("%v saveToStorage [%d,%d): %v", fileFullPath, offset, offset+size, err) pages.lastErr = err return } - chunk.ModifiedTsNs = mtime pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) + pages.fh.entryChunkGroup.AddChunk(chunk) glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size) } diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index b6ec3d2da..67298b047 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -5,50 +5,60 @@ import ( "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" - "golang.org/x/exp/slices" - "golang.org/x/sync/semaphore" - "math" + "os" "sync" ) type FileHandleId uint64 +var IsDebugFileReadWrite = false + type FileHandle struct { - fh FileHandleId - counter int64 - entry *LockedEntry - entryLock sync.Mutex - inode uint64 - wfs *WFS + fh FileHandleId + counter int64 + entry *LockedEntry + entryLock sync.Mutex + entryChunkGroup *filer.ChunkGroup + inode uint64 + wfs *WFS // cache file has been written to - dirtyMetadata bool - dirtyPages *PageWriter - entryViewCache []filer.VisibleInterval - reader *filer.ChunkReadAt - contentType string - handle uint64 - orderedMutex *semaphore.Weighted + dirtyMetadata bool + dirtyPages *PageWriter + reader *filer.ChunkReadAt + contentType string + handle uint64 + sync.Mutex isDeleted bool + + // for debugging + mirrorFile *os.File } func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_pb.Entry) *FileHandle { fh := &FileHandle{ - fh: handleId, - counter: 1, - inode: inode, - wfs: wfs, - orderedMutex: semaphore.NewWeighted(int64(math.MaxInt64)), + fh: handleId, + counter: 1, + inode: inode, + wfs: wfs, } // dirtyPages: newContinuousDirtyPages(file, writeOnly), fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit) - if entry != nil { - entry.Attributes.FileSize = filer.FileSize(entry) - } fh.entry = &LockedEntry{ Entry: entry, } + if entry != nil { + fh.SetEntry(entry) + } + + if IsDebugFileReadWrite { + var err error + fh.mirrorFile, err = os.OpenFile("/tmp/sw/"+entry.Name, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + println("failed to create mirror:", err.Error()) + } + } return fh } @@ -63,6 +73,17 @@ func (fh *FileHandle) GetEntry() *filer_pb.Entry { } func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) { + if entry != nil { + fileSize := filer.FileSize(entry) + entry.Attributes.FileSize = fileSize + var resolveManifestErr error + fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks) + if resolveManifestErr != nil { + glog.Warningf("failed to resolve manifest chunks in %+v", entry) + } + } else { + glog.Fatalf("setting file handle entry to nil") + } fh.entry.SetEntry(entry) } @@ -78,43 +99,17 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) { return } - // find the earliest incoming chunk - newChunks := chunks - earliestChunk := newChunks[0] - for i := 1; i < len(newChunks); i++ { - if lessThan(earliestChunk, newChunks[i]) { - earliestChunk = newChunks[i] - } - } - - // pick out-of-order chunks from existing chunks - for _, chunk := range fh.entry.GetChunks() { - if lessThan(earliestChunk, chunk) { - chunks = append(chunks, chunk) - } - } - - // sort incoming chunks - slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { - return lessThan(a, b) - }) - - glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.GetChunks()), len(chunks)) - - fh.entry.AppendChunks(newChunks) - fh.entryViewCache = nil + fh.entry.AppendChunks(chunks) } -func (fh *FileHandle) CloseReader() { - if fh.reader != nil { - _ = fh.reader.Close() - fh.reader = nil - } -} +func (fh *FileHandle) ReleaseHandle() { + fh.entryLock.Lock() + defer fh.entryLock.Unlock() -func (fh *FileHandle) Release() { fh.dirtyPages.Destroy() - fh.CloseReader() + if IsDebugFileReadWrite { + fh.mirrorFile.Close() + } } func lessThan(a, b *filer_pb.FileChunk) bool { diff --git a/weed/mount/filehandle_map.go b/weed/mount/filehandle_map.go index cc5885ffc..f0051f061 100644 --- a/weed/mount/filehandle_map.go +++ b/weed/mount/filehandle_map.go @@ -65,7 +65,7 @@ func (i *FileHandleToInode) ReleaseByInode(inode uint64) { if fh.counter <= 0 { delete(i.inode2fh, inode) delete(i.fh2inode, fh.fh) - fh.Release() + fh.ReleaseHandle() } } } @@ -82,7 +82,7 @@ func (i *FileHandleToInode) ReleaseByHandle(fh FileHandleId) { if fhHandle.counter <= 0 { delete(i.inode2fh, inode) delete(i.fh2inode, fhHandle.fh) - fhHandle.Release() + fhHandle.ReleaseHandle() } } diff --git a/weed/mount/filehandle_read.go b/weed/mount/filehandle_read.go index a316a16cd..be6d5d984 100644 --- a/weed/mount/filehandle_read.go +++ b/weed/mount/filehandle_read.go @@ -17,18 +17,20 @@ func (fh *FileHandle) unlockForRead(startOffset int64, size int) { fh.dirtyPages.UnlockForRead(startOffset, startOffset+int64(size)) } -func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxStop int64) { - maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset) +func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64, tsNs int64) (maxStop int64) { + maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset, tsNs) return } -func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { +func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, int64, error) { + fh.entryLock.Lock() + defer fh.entryLock.Unlock() fileFullPath := fh.FullPath() entry := fh.GetEntry() if entry == nil { - return 0, io.EOF + return 0, 0, io.EOF } if entry.IsInRemoteOnly() { @@ -36,43 +38,28 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { newEntry, err := fh.downloadRemoteEntry(entry) if err != nil { glog.V(1).Infof("download remote entry %s: %v", fileFullPath, err) - return 0, err + return 0, 0, err } entry = newEntry } - fileSize := int64(filer.FileSize(entry)) + fileSize := int64(entry.Attributes.FileSize) + if fileSize == 0 { + fileSize = int64(filer.FileSize(entry)) + } if fileSize == 0 { glog.V(1).Infof("empty fh %v", fileFullPath) - return 0, io.EOF + return 0, 0, io.EOF } if offset+int64(len(buff)) <= int64(len(entry.Content)) { totalRead := copy(buff, entry.Content[offset:]) glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) - return int64(totalRead), nil - } - - var chunkResolveErr error - if fh.entryViewCache == nil { - fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), entry.GetChunks(), 0, fileSize) - if chunkResolveErr != nil { - return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) - } - fh.CloseReader() - } - - if fh.reader == nil { - chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, fileSize) - glog.V(4).Infof("file handle read %s [%d,%d) from %d views", fileFullPath, offset, offset+int64(len(buff)), len(chunkViews)) - for _, chunkView := range chunkViews { - glog.V(4).Infof(" read %s [%d,%d) from chunk %+v", fileFullPath, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.FileId) - } - fh.reader = filer.NewChunkReaderAtFromClient(fh.wfs.LookupFn(), chunkViews, fh.wfs.chunkCache, fileSize) + return int64(totalRead), 0, nil } - totalRead, err := fh.reader.ReadAt(buff, offset) + totalRead, ts, err := fh.entryChunkGroup.ReadDataAt(fileSize, buff, offset) if err != nil && err != io.EOF { glog.Errorf("file handle read %s: %v", fileFullPath, err) @@ -80,7 +67,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { // glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err) - return int64(totalRead), err + return int64(totalRead), ts, err } func (fh *FileHandle) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) { diff --git a/weed/mount/page_writer.go b/weed/mount/page_writer.go index 1f31b5300..c9470c440 100644 --- a/weed/mount/page_writer.go +++ b/weed/mount/page_writer.go @@ -29,35 +29,35 @@ func newPageWriter(fh *FileHandle, chunkSize int64) *PageWriter { return pw } -func (pw *PageWriter) AddPage(offset int64, data []byte, isSequential bool) { +func (pw *PageWriter) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) { glog.V(4).Infof("%v AddPage [%d, %d)", pw.fh.fh, offset, offset+int64(len(data))) chunkIndex := offset / pw.chunkSize for i := chunkIndex; len(data) > 0; i++ { writeSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset) - pw.addToOneChunk(i, offset, data[:writeSize], isSequential) + pw.addToOneChunk(i, offset, data[:writeSize], isSequential, tsNs) offset += writeSize data = data[writeSize:] } } -func (pw *PageWriter) addToOneChunk(chunkIndex, offset int64, data []byte, isSequential bool) { - pw.randomWriter.AddPage(offset, data, isSequential) +func (pw *PageWriter) addToOneChunk(chunkIndex, offset int64, data []byte, isSequential bool, tsNs int64) { + pw.randomWriter.AddPage(offset, data, isSequential, tsNs) } func (pw *PageWriter) FlushData() error { return pw.randomWriter.FlushData() } -func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64) (maxStop int64) { +func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64, tsNs int64) (maxStop int64) { glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.fh.fh, offset, offset+int64(len(data))) chunkIndex := offset / pw.chunkSize for i := chunkIndex; len(data) > 0; i++ { readSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset) - maxStop = pw.randomWriter.ReadDirtyDataAt(data[:readSize], offset) + maxStop = pw.randomWriter.ReadDirtyDataAt(data[:readSize], offset, tsNs) offset += readSize data = data[readSize:] diff --git a/weed/mount/page_writer/activity_score.go b/weed/mount/page_writer/activity_score.go new file mode 100644 index 000000000..22da87e37 --- /dev/null +++ b/weed/mount/page_writer/activity_score.go @@ -0,0 +1,39 @@ +package page_writer + +import "time" + +type ActivityScore struct { + lastActiveTsNs int64 + decayedActivenessScore int64 +} + +func NewActivityScore() *ActivityScore { + return &ActivityScore{} +} + +func (as ActivityScore) MarkRead() { + now := time.Now().UnixNano() + deltaTime := (now - as.lastActiveTsNs) >> 30 // about number of seconds + as.lastActiveTsNs = now + + as.decayedActivenessScore = as.decayedActivenessScore>>deltaTime + 256 + if as.decayedActivenessScore < 0 { + as.decayedActivenessScore = 0 + } +} + +func (as ActivityScore) MarkWrite() { + now := time.Now().UnixNano() + deltaTime := (now - as.lastActiveTsNs) >> 30 // about number of seconds + as.lastActiveTsNs = now + + as.decayedActivenessScore = as.decayedActivenessScore>>deltaTime + 1024 + if as.decayedActivenessScore < 0 { + as.decayedActivenessScore = 0 + } +} + +func (as ActivityScore) ActivityScore() int64 { + deltaTime := (time.Now().UnixNano() - as.lastActiveTsNs) >> 30 // about number of seconds + return as.decayedActivenessScore >> deltaTime +} diff --git a/weed/mount/page_writer/chunk_interval_list.go b/weed/mount/page_writer/chunk_interval_list.go index a9d64c8e4..005385c1a 100644 --- a/weed/mount/page_writer/chunk_interval_list.go +++ b/weed/mount/page_writer/chunk_interval_list.go @@ -8,6 +8,7 @@ import ( type ChunkWrittenInterval struct { StartOffset int64 stopOffset int64 + TsNs int64 prev *ChunkWrittenInterval next *ChunkWrittenInterval } @@ -42,10 +43,14 @@ func newChunkWrittenIntervalList() *ChunkWrittenIntervalList { return list } -func (list *ChunkWrittenIntervalList) MarkWritten(startOffset, stopOffset int64) { +func (list *ChunkWrittenIntervalList) MarkWritten(startOffset, stopOffset, tsNs int64) { + if startOffset >= stopOffset { + return + } interval := &ChunkWrittenInterval{ StartOffset: startOffset, stopOffset: stopOffset, + TsNs: tsNs, } list.addInterval(interval) } @@ -62,50 +67,54 @@ func (list *ChunkWrittenIntervalList) WrittenSize() (writtenByteCount int64) { func (list *ChunkWrittenIntervalList) addInterval(interval *ChunkWrittenInterval) { + //t := list.head + //for ; t.next != nil; t = t.next { + // if t.TsNs > interval.TsNs { + // println("writes is out of order", t.TsNs-interval.TsNs, "ns") + // } + //} + p := list.head - for ; p.next != nil && p.next.StartOffset <= interval.StartOffset; p = p.next { + for ; p.next != nil && p.next.stopOffset <= interval.StartOffset; p = p.next { } q := list.tail - for ; q.prev != nil && q.prev.stopOffset >= interval.stopOffset; q = q.prev { + for ; q.prev != nil && q.prev.StartOffset >= interval.stopOffset; q = q.prev { } - if interval.StartOffset <= p.stopOffset && q.StartOffset <= interval.stopOffset { - // merge p and q together - p.stopOffset = q.stopOffset - unlinkNodesBetween(p, q.next) - return + // left side + // interval after p.next start + if p.next.StartOffset < interval.StartOffset { + t := &ChunkWrittenInterval{ + StartOffset: p.next.StartOffset, + stopOffset: interval.StartOffset, + TsNs: p.next.TsNs, + } + p.next = t + t.prev = p + t.next = interval + interval.prev = t + } else { + p.next = interval + interval.prev = p } - if interval.StartOffset <= p.stopOffset { - // merge new interval into p - p.stopOffset = interval.stopOffset - unlinkNodesBetween(p, q) - return - } - if q.StartOffset <= interval.stopOffset { - // merge new interval into q - q.StartOffset = interval.StartOffset - unlinkNodesBetween(p, q) - return - } - - // add the new interval between p and q - unlinkNodesBetween(p, q) - p.next = interval - interval.prev = p - q.prev = interval - interval.next = q - -} -// unlinkNodesBetween remove all nodes after start and before stop, exclusive -func unlinkNodesBetween(start *ChunkWrittenInterval, stop *ChunkWrittenInterval) { - if start.next == stop { - return + // right side + // interval ends before p.prev + if interval.stopOffset < q.prev.stopOffset { + t := &ChunkWrittenInterval{ + StartOffset: interval.stopOffset, + stopOffset: q.prev.stopOffset, + TsNs: q.prev.TsNs, + } + q.prev = t + t.next = q + interval.next = t + t.prev = interval + } else { + q.prev = interval + interval.next = q } - start.next.prev = nil - start.next = stop - stop.prev.next = nil - stop.prev = start + } func (list *ChunkWrittenIntervalList) size() int { diff --git a/weed/mount/page_writer/chunk_interval_list_test.go b/weed/mount/page_writer/chunk_interval_list_test.go index b22f5eb5d..eb1d5ff46 100644 --- a/weed/mount/page_writer/chunk_interval_list_test.go +++ b/weed/mount/page_writer/chunk_interval_list_test.go @@ -10,40 +10,72 @@ func Test_PageChunkWrittenIntervalList(t *testing.T) { assert.Equal(t, 0, list.size(), "empty list") - list.MarkWritten(0, 5) + list.MarkWritten(0, 5, 1) assert.Equal(t, 1, list.size(), "one interval") - list.MarkWritten(0, 5) + list.MarkWritten(0, 5, 2) assert.Equal(t, 1, list.size(), "duplicated interval2") - list.MarkWritten(95, 100) + list.MarkWritten(95, 100, 3) assert.Equal(t, 2, list.size(), "two intervals") - list.MarkWritten(50, 60) + list.MarkWritten(50, 60, 4) assert.Equal(t, 3, list.size(), "three intervals") - list.MarkWritten(50, 55) - assert.Equal(t, 3, list.size(), "three intervals merge") + list.MarkWritten(50, 55, 5) + assert.Equal(t, 4, list.size(), "three intervals merge") - list.MarkWritten(40, 50) - assert.Equal(t, 3, list.size(), "three intervals grow forward") + list.MarkWritten(40, 50, 6) + assert.Equal(t, 5, list.size(), "three intervals grow forward") - list.MarkWritten(50, 65) - assert.Equal(t, 3, list.size(), "three intervals grow backward") + list.MarkWritten(50, 65, 7) + assert.Equal(t, 4, list.size(), "three intervals grow backward") - list.MarkWritten(70, 80) - assert.Equal(t, 4, list.size(), "four intervals") + list.MarkWritten(70, 80, 8) + assert.Equal(t, 5, list.size(), "four intervals") - list.MarkWritten(60, 70) - assert.Equal(t, 3, list.size(), "three intervals merged") + list.MarkWritten(60, 70, 9) + assert.Equal(t, 6, list.size(), "three intervals merged") - list.MarkWritten(59, 71) - assert.Equal(t, 3, list.size(), "covered three intervals") + list.MarkWritten(59, 71, 10) + assert.Equal(t, 6, list.size(), "covered three intervals") - list.MarkWritten(5, 59) - assert.Equal(t, 2, list.size(), "covered two intervals") + list.MarkWritten(5, 59, 11) + assert.Equal(t, 5, list.size(), "covered two intervals") - list.MarkWritten(70, 99) - assert.Equal(t, 1, list.size(), "covered one intervals") + list.MarkWritten(70, 99, 12) + assert.Equal(t, 5, list.size(), "covered one intervals") } + +type interval struct { + start int64 + stop int64 + expected bool +} + +func Test_PageChunkWrittenIntervalList1(t *testing.T) { + list := newChunkWrittenIntervalList() + inputs := []interval{ + {1, 5, true}, + {2, 3, true}, + } + for i, input := range inputs { + list.MarkWritten(input.start, input.stop, int64(i)+1) + actual := hasData(list, 0, 4) + if actual != input.expected { + t.Errorf("input [%d,%d) expected %v actual %v", input.start, input.stop, input.expected, actual) + } + } +} + +func hasData(usage *ChunkWrittenIntervalList, chunkStartOffset, x int64) bool { + for t := usage.head.next; t != usage.tail; t = t.next { + logicStart := chunkStartOffset + t.StartOffset + logicStop := chunkStartOffset + t.stopOffset + if logicStart <= x && x < logicStop { + return true + } + } + return false +} diff --git a/weed/mount/page_writer/dirty_pages.go b/weed/mount/page_writer/dirty_pages.go index 44f879afc..7cddcf69e 100644 --- a/weed/mount/page_writer/dirty_pages.go +++ b/weed/mount/page_writer/dirty_pages.go @@ -1,9 +1,9 @@ package page_writer type DirtyPages interface { - AddPage(offset int64, data []byte, isSequential bool) + AddPage(offset int64, data []byte, isSequential bool, tsNs int64) FlushData() error - ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) + ReadDirtyDataAt(data []byte, startOffset int64, tsNs int64) (maxStop int64) Destroy() LockForRead(startOffset, stopOffset int64) UnlockForRead(startOffset, stopOffset int64) diff --git a/weed/mount/page_writer/page_chunk.go b/weed/mount/page_writer/page_chunk.go index 4e8f31425..32d246deb 100644 --- a/weed/mount/page_writer/page_chunk.go +++ b/weed/mount/page_writer/page_chunk.go @@ -4,13 +4,13 @@ import ( "io" ) -type SaveToStorageFunc func(reader io.Reader, offset int64, size int64, cleanupFn func()) +type SaveToStorageFunc func(reader io.Reader, offset int64, size int64, modifiedTsNs int64, cleanupFn func()) type PageChunk interface { FreeResource() - WriteDataAt(src []byte, offset int64) (n int) - ReadDataAt(p []byte, off int64) (maxStop int64) + WriteDataAt(src []byte, offset int64, tsNs int64) (n int) + ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) IsComplete() bool - WrittenSize() int64 + ActivityScore() int64 SaveContent(saveFn SaveToStorageFunc) } diff --git a/weed/mount/page_writer/page_chunk_mem.go b/weed/mount/page_writer/page_chunk_mem.go index 8cccded67..1ec8cecb4 100644 --- a/weed/mount/page_writer/page_chunk_mem.go +++ b/weed/mount/page_writer/page_chunk_mem.go @@ -19,6 +19,7 @@ type MemChunk struct { usage *ChunkWrittenIntervalList chunkSize int64 logicChunkIndex LogicChunkIndex + activityScore *ActivityScore } func NewMemChunk(logicChunkIndex LogicChunkIndex, chunkSize int64) *MemChunk { @@ -28,6 +29,7 @@ func NewMemChunk(logicChunkIndex LogicChunkIndex, chunkSize int64) *MemChunk { chunkSize: chunkSize, buf: mem.Allocate(int(chunkSize)), usage: newChunkWrittenIntervalList(), + activityScore: NewActivityScore(), } } @@ -39,29 +41,37 @@ func (mc *MemChunk) FreeResource() { mem.Free(mc.buf) } -func (mc *MemChunk) WriteDataAt(src []byte, offset int64) (n int) { +func (mc *MemChunk) WriteDataAt(src []byte, offset int64, tsNs int64) (n int) { mc.Lock() defer mc.Unlock() innerOffset := offset % mc.chunkSize n = copy(mc.buf[innerOffset:], src) - mc.usage.MarkWritten(innerOffset, innerOffset+int64(n)) + mc.usage.MarkWritten(innerOffset, innerOffset+int64(n), tsNs) + mc.activityScore.MarkWrite() + return } -func (mc *MemChunk) ReadDataAt(p []byte, off int64) (maxStop int64) { +func (mc *MemChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) { mc.RLock() defer mc.RUnlock() memChunkBaseOffset := int64(mc.logicChunkIndex) * mc.chunkSize for t := mc.usage.head.next; t != mc.usage.tail; t = t.next { - logicStart := max(off, int64(mc.logicChunkIndex)*mc.chunkSize+t.StartOffset) + logicStart := max(off, memChunkBaseOffset+t.StartOffset) logicStop := min(off+int64(len(p)), memChunkBaseOffset+t.stopOffset) if logicStart < logicStop { - copy(p[logicStart-off:logicStop-off], mc.buf[logicStart-memChunkBaseOffset:logicStop-memChunkBaseOffset]) - maxStop = max(maxStop, logicStop) + if t.TsNs >= tsNs { + copy(p[logicStart-off:logicStop-off], mc.buf[logicStart-memChunkBaseOffset:logicStop-memChunkBaseOffset]) + maxStop = max(maxStop, logicStop) + } else { + println("read old data1", tsNs-t.TsNs, "ns") + } } } + mc.activityScore.MarkRead() + return } @@ -72,11 +82,8 @@ func (mc *MemChunk) IsComplete() bool { return mc.usage.IsComplete(mc.chunkSize) } -func (mc *MemChunk) WrittenSize() int64 { - mc.RLock() - defer mc.RUnlock() - - return mc.usage.WrittenSize() +func (mc *MemChunk) ActivityScore() int64 { + return mc.activityScore.ActivityScore() } func (mc *MemChunk) SaveContent(saveFn SaveToStorageFunc) { @@ -88,7 +95,7 @@ func (mc *MemChunk) SaveContent(saveFn SaveToStorageFunc) { } for t := mc.usage.head.next; t != mc.usage.tail; t = t.next { reader := util.NewBytesReader(mc.buf[t.StartOffset:t.stopOffset]) - saveFn(reader, int64(mc.logicChunkIndex)*mc.chunkSize+t.StartOffset, t.Size(), func() { + saveFn(reader, int64(mc.logicChunkIndex)*mc.chunkSize+t.StartOffset, t.Size(), t.TsNs, func() { }) } } diff --git a/weed/mount/page_writer/page_chunk_swapfile.go b/weed/mount/page_writer/page_chunk_swapfile.go index bf2cdb256..6cedc64df 100644 --- a/weed/mount/page_writer/page_chunk_swapfile.go +++ b/weed/mount/page_writer/page_chunk_swapfile.go @@ -15,12 +15,12 @@ var ( type ActualChunkIndex int type SwapFile struct { - dir string - file *os.File - logicToActualChunkIndex map[LogicChunkIndex]ActualChunkIndex - logicToActualChunkIndexLock sync.Mutex - chunkSize int64 - freeActualChunkList []ActualChunkIndex + dir string + file *os.File + chunkSize int64 + chunkTrackingLock sync.Mutex + activeChunkCount int + freeActualChunkList []ActualChunkIndex } type SwapFileChunk struct { @@ -29,14 +29,15 @@ type SwapFileChunk struct { usage *ChunkWrittenIntervalList logicChunkIndex LogicChunkIndex actualChunkIndex ActualChunkIndex + activityScore *ActivityScore + //memChunk *MemChunk } func NewSwapFile(dir string, chunkSize int64) *SwapFile { return &SwapFile{ - dir: dir, - file: nil, - logicToActualChunkIndex: make(map[LogicChunkIndex]ActualChunkIndex), - chunkSize: chunkSize, + dir: dir, + file: nil, + chunkSize: chunkSize, } } func (sf *SwapFile) FreeResource() { @@ -46,7 +47,7 @@ func (sf *SwapFile) FreeResource() { } } -func (sf *SwapFile) NewTempFileChunk(logicChunkIndex LogicChunkIndex) (tc *SwapFileChunk) { +func (sf *SwapFile) NewSwapFileChunk(logicChunkIndex LogicChunkIndex) (tc *SwapFileChunk) { if sf.file == nil { var err error sf.file, err = os.CreateTemp(sf.dir, "") @@ -55,70 +56,98 @@ func (sf *SwapFile) NewTempFileChunk(logicChunkIndex LogicChunkIndex) (tc *SwapF return nil } } - sf.logicToActualChunkIndexLock.Lock() - defer sf.logicToActualChunkIndexLock.Unlock() - actualChunkIndex, found := sf.logicToActualChunkIndex[logicChunkIndex] - if !found { - if len(sf.freeActualChunkList) > 0 { - actualChunkIndex = sf.freeActualChunkList[0] - sf.freeActualChunkList = sf.freeActualChunkList[1:] - } else { - actualChunkIndex = ActualChunkIndex(len(sf.logicToActualChunkIndex)) - } - sf.logicToActualChunkIndex[logicChunkIndex] = actualChunkIndex + sf.chunkTrackingLock.Lock() + defer sf.chunkTrackingLock.Unlock() + + sf.activeChunkCount++ + + // assign a new physical chunk + var actualChunkIndex ActualChunkIndex + if len(sf.freeActualChunkList) > 0 { + actualChunkIndex = sf.freeActualChunkList[0] + sf.freeActualChunkList = sf.freeActualChunkList[1:] + } else { + actualChunkIndex = ActualChunkIndex(sf.activeChunkCount) } - return &SwapFileChunk{ + swapFileChunk := &SwapFileChunk{ swapfile: sf, usage: newChunkWrittenIntervalList(), logicChunkIndex: logicChunkIndex, actualChunkIndex: actualChunkIndex, + activityScore: NewActivityScore(), + // memChunk: NewMemChunk(logicChunkIndex, sf.chunkSize), } + + // println(logicChunkIndex, "|", "++++", swapFileChunk.actualChunkIndex, swapFileChunk, sf) + return swapFileChunk } func (sc *SwapFileChunk) FreeResource() { - sc.swapfile.logicToActualChunkIndexLock.Lock() - defer sc.swapfile.logicToActualChunkIndexLock.Unlock() sc.Lock() defer sc.Unlock() + sc.swapfile.chunkTrackingLock.Lock() + defer sc.swapfile.chunkTrackingLock.Unlock() + sc.swapfile.freeActualChunkList = append(sc.swapfile.freeActualChunkList, sc.actualChunkIndex) - delete(sc.swapfile.logicToActualChunkIndex, sc.logicChunkIndex) + sc.swapfile.activeChunkCount-- + // println(sc.logicChunkIndex, "|", "----", sc.actualChunkIndex, sc, sc.swapfile) } -func (sc *SwapFileChunk) WriteDataAt(src []byte, offset int64) (n int) { +func (sc *SwapFileChunk) WriteDataAt(src []byte, offset int64, tsNs int64) (n int) { sc.Lock() defer sc.Unlock() + // println(sc.logicChunkIndex, "|", tsNs, "write at", offset, len(src), sc.actualChunkIndex) + innerOffset := offset % sc.swapfile.chunkSize var err error n, err = sc.swapfile.file.WriteAt(src, int64(sc.actualChunkIndex)*sc.swapfile.chunkSize+innerOffset) - if err == nil { - sc.usage.MarkWritten(innerOffset, innerOffset+int64(n)) - } else { + sc.usage.MarkWritten(innerOffset, innerOffset+int64(n), tsNs) + if err != nil { glog.Errorf("failed to write swap file %s: %v", sc.swapfile.file.Name(), err) } + //sc.memChunk.WriteDataAt(src, offset, tsNs) + sc.activityScore.MarkWrite() + return } -func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64) (maxStop int64) { +func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) { sc.RLock() defer sc.RUnlock() + // println(sc.logicChunkIndex, "|", tsNs, "read at", off, len(p), sc.actualChunkIndex) + + //memCopy := make([]byte, len(p)) + //copy(memCopy, p) + chunkStartOffset := int64(sc.logicChunkIndex) * sc.swapfile.chunkSize for t := sc.usage.head.next; t != sc.usage.tail; t = t.next { logicStart := max(off, chunkStartOffset+t.StartOffset) logicStop := min(off+int64(len(p)), chunkStartOffset+t.stopOffset) if logicStart < logicStop { - actualStart := logicStart - chunkStartOffset + int64(sc.actualChunkIndex)*sc.swapfile.chunkSize - if _, err := sc.swapfile.file.ReadAt(p[logicStart-off:logicStop-off], actualStart); err != nil { - glog.Errorf("failed to reading swap file %s: %v", sc.swapfile.file.Name(), err) - break + if t.TsNs >= tsNs { + actualStart := logicStart - chunkStartOffset + int64(sc.actualChunkIndex)*sc.swapfile.chunkSize + if _, err := sc.swapfile.file.ReadAt(p[logicStart-off:logicStop-off], actualStart); err != nil { + glog.Errorf("failed to reading swap file %s: %v", sc.swapfile.file.Name(), err) + break + } + maxStop = max(maxStop, logicStop) + } else { + println("read old data2", tsNs-t.TsNs, "ns") } - maxStop = max(maxStop, logicStop) } } + //sc.memChunk.ReadDataAt(memCopy, off, tsNs) + //if bytes.Compare(memCopy, p) != 0 { + // println("read wrong data from swap file", off, sc.logicChunkIndex) + //} + + sc.activityScore.MarkRead() + return } @@ -128,27 +157,27 @@ func (sc *SwapFileChunk) IsComplete() bool { return sc.usage.IsComplete(sc.swapfile.chunkSize) } -func (sc *SwapFileChunk) WrittenSize() int64 { - sc.RLock() - defer sc.RUnlock() - return sc.usage.WrittenSize() +func (sc *SwapFileChunk) ActivityScore() int64 { + return sc.activityScore.ActivityScore() } func (sc *SwapFileChunk) SaveContent(saveFn SaveToStorageFunc) { + sc.RLock() + defer sc.RUnlock() + if saveFn == nil { return } - sc.Lock() - defer sc.Unlock() - + // println(sc.logicChunkIndex, "|", "save") for t := sc.usage.head.next; t != sc.usage.tail; t = t.next { data := mem.Allocate(int(t.Size())) - sc.swapfile.file.ReadAt(data, t.StartOffset+int64(sc.actualChunkIndex)*sc.swapfile.chunkSize) - reader := util.NewBytesReader(data) - saveFn(reader, int64(sc.logicChunkIndex)*sc.swapfile.chunkSize+t.StartOffset, t.Size(), func() { - }) + n, _ := sc.swapfile.file.ReadAt(data, t.StartOffset+int64(sc.actualChunkIndex)*sc.swapfile.chunkSize) + if n > 0 { + reader := util.NewBytesReader(data[:n]) + saveFn(reader, int64(sc.logicChunkIndex)*sc.swapfile.chunkSize+t.StartOffset, int64(n), t.TsNs, func() { + }) + } mem.Free(data) } - sc.usage = newChunkWrittenIntervalList() } diff --git a/weed/mount/page_writer/upload_pipeline.go b/weed/mount/page_writer/upload_pipeline.go index 252dddc06..6065f2f76 100644 --- a/weed/mount/page_writer/upload_pipeline.go +++ b/weed/mount/page_writer/upload_pipeline.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/util" + "math" "sync" "sync/atomic" ) @@ -55,7 +56,8 @@ func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, return t } -func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) { +func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool, tsNs int64) (n int) { + up.chunksLock.Lock() defer up.chunksLock.Unlock() @@ -65,33 +67,39 @@ func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n if !found { if len(up.writableChunks) > up.writableChunkLimit { // if current file chunks is over the per file buffer count limit - fullestChunkIndex, fullness := LogicChunkIndex(-1), int64(0) - for lci, mc := range up.writableChunks { - chunkFullness := mc.WrittenSize() - if fullness < chunkFullness { - fullestChunkIndex = lci - fullness = chunkFullness + laziestChunkIndex, lowestActivityScore := LogicChunkIndex(-1), int64(math.MaxInt64) + for wci, wc := range up.writableChunks { + activityScore := wc.ActivityScore() + if lowestActivityScore > activityScore { + laziestChunkIndex = wci + lowestActivityScore = activityScore } } - up.moveToSealed(up.writableChunks[fullestChunkIndex], fullestChunkIndex) - // fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, fullness) + up.moveToSealed(up.writableChunks[laziestChunkIndex], laziestChunkIndex) + // fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, oldestTs) } if isSequential && len(up.writableChunks) < up.writableChunkLimit && atomic.LoadInt64(&memChunkCounter) < 4*int64(up.writableChunkLimit) { pageChunk = NewMemChunk(logicChunkIndex, up.ChunkSize) } else { - pageChunk = up.swapFile.NewTempFileChunk(logicChunkIndex) + pageChunk = up.swapFile.NewSwapFileChunk(logicChunkIndex) } up.writableChunks[logicChunkIndex] = pageChunk } - n = pageChunk.WriteDataAt(p, off) + //if _, foundSealed := up.sealedChunks[logicChunkIndex]; foundSealed { + // println("found already sealed chunk", logicChunkIndex) + //} + //if _, foundReading := up.activeReadChunks[logicChunkIndex]; foundReading { + // println("found active read chunk", logicChunkIndex) + //} + n = pageChunk.WriteDataAt(p, off, tsNs) up.maybeMoveToSealed(pageChunk, logicChunkIndex) return } -func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { +func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) { logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) up.chunksLock.Lock() @@ -103,12 +111,8 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { // read from sealed chunks first sealedChunk, found := up.sealedChunks[logicChunkIndex] if found { - sealedChunk.referenceCounter++ - } - if found { - maxStop = sealedChunk.chunk.ReadDataAt(p, off) + maxStop = sealedChunk.chunk.ReadDataAt(p, off, tsNs) glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) - sealedChunk.FreeReference(fmt.Sprintf("%s finish reading chunk %d", up.filepath, logicChunkIndex)) } // read from writable chunks last @@ -116,7 +120,7 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { if !found { return } - writableMaxStop := writableChunk.ReadDataAt(p, off) + writableMaxStop := writableChunk.ReadDataAt(p, off, tsNs) glog.V(4).Infof("%s read writable memchunk [%d,%d)", up.filepath, off, writableMaxStop) maxStop = max(maxStop, writableMaxStop) diff --git a/weed/mount/page_writer/upload_pipeline_test.go b/weed/mount/page_writer/upload_pipeline_test.go index 27da7036d..2d803f6af 100644 --- a/weed/mount/page_writer/upload_pipeline_test.go +++ b/weed/mount/page_writer/upload_pipeline_test.go @@ -31,14 +31,14 @@ func writeRange(uploadPipeline *UploadPipeline, startOff, stopOff int64) { p := make([]byte, 4) for i := startOff / 4; i < stopOff/4; i += 4 { util.Uint32toBytes(p, uint32(i)) - uploadPipeline.SaveDataAt(p, i, false) + uploadPipeline.SaveDataAt(p, i, false, 0) } } func confirmRange(t *testing.T, uploadPipeline *UploadPipeline, startOff, stopOff int64) { p := make([]byte, 4) for i := startOff; i < stopOff/4; i += 4 { - uploadPipeline.MaybeReadDataAt(p, i) + uploadPipeline.MaybeReadDataAt(p, i, 0) x := util.BytesToUint32(p) if x != uint32(i) { t.Errorf("expecting %d found %d at offset [%d,%d)", i, x, i, i+4) diff --git a/weed/mount/weedfs_attr.go b/weed/mount/weedfs_attr.go index 1d58e0852..7dc3c6b50 100644 --- a/weed/mount/weedfs_attr.go +++ b/weed/mount/weedfs_attr.go @@ -20,12 +20,12 @@ func (wfs *WFS) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse _, _, entry, status := wfs.maybeReadEntry(inode) if status == fuse.OK { out.AttrValid = 1 - wfs.setAttrByPbEntry(&out.Attr, inode, entry) + wfs.setAttrByPbEntry(&out.Attr, inode, entry, true) return status } else { if fh, found := wfs.fhmap.FindFileHandle(inode); found { out.AttrValid = 1 - wfs.setAttrByPbEntry(&out.Attr, inode, fh.entry.GetEntry()) + wfs.setAttrByPbEntry(&out.Attr, inode, fh.entry.GetEntry(), true) out.Nlink = 0 return fuse.OK } @@ -75,7 +75,7 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse // set the new chunks and reset entry cache entry.Chunks = chunks if fh != nil { - fh.entryViewCache = nil + fh.entryChunkGroup.SetChunks(chunks) } } entry.Attributes.Mtime = time.Now().Unix() @@ -114,7 +114,11 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse } out.AttrValid = 1 - wfs.setAttrByPbEntry(&out.Attr, input.NodeId, entry) + size, includeSize := input.GetSize() + if includeSize { + out.Attr.Size = size + } + wfs.setAttrByPbEntry(&out.Attr, input.NodeId, entry, !includeSize) if fh != nil { fh.dirtyMetadata = true @@ -139,12 +143,14 @@ func (wfs *WFS) setRootAttr(out *fuse.AttrOut) { out.Nlink = 1 } -func (wfs *WFS) setAttrByPbEntry(out *fuse.Attr, inode uint64, entry *filer_pb.Entry) { +func (wfs *WFS) setAttrByPbEntry(out *fuse.Attr, inode uint64, entry *filer_pb.Entry, calculateSize bool) { out.Ino = inode if entry.Attributes != nil && entry.Attributes.Inode != 0 { out.Ino = entry.Attributes.Inode } - out.Size = filer.FileSize(entry) + if calculateSize { + out.Size = filer.FileSize(entry) + } if entry.FileMode()&os.ModeSymlink != 0 { out.Size = uint64(len(entry.Attributes.SymlinkTarget)) } @@ -194,7 +200,7 @@ func (wfs *WFS) outputPbEntry(out *fuse.EntryOut, inode uint64, entry *filer_pb. out.Generation = 1 out.EntryValid = 1 out.AttrValid = 1 - wfs.setAttrByPbEntry(&out.Attr, inode, entry) + wfs.setAttrByPbEntry(&out.Attr, inode, entry, true) } func (wfs *WFS) outputFilerEntry(out *fuse.EntryOut, inode uint64, entry *filer.Entry) { diff --git a/weed/mount/weedfs_file_copy_range.go b/weed/mount/weedfs_file_copy_range.go index bc092a252..e3f841b02 100644 --- a/weed/mount/weedfs_file_copy_range.go +++ b/weed/mount/weedfs_file_copy_range.go @@ -1,8 +1,8 @@ package mount import ( - "context" "net/http" + "time" "github.com/hanwen/go-fuse/v2/fuse" @@ -44,8 +44,8 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn) } // lock source and target file handles - fhOut.orderedMutex.Acquire(context.Background(), 1) - defer fhOut.orderedMutex.Release(1) + fhOut.Lock() + defer fhOut.Unlock() fhOut.entryLock.Lock() defer fhOut.entryLock.Unlock() @@ -54,8 +54,8 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn) } if fhIn.fh != fhOut.fh { - fhIn.orderedMutex.Acquire(context.Background(), 1) - defer fhIn.orderedMutex.Release(1) + fhIn.Lock() + defer fhIn.Unlock() fhIn.entryLock.Lock() defer fhIn.entryLock.Unlock() } @@ -88,7 +88,7 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn) // put data at the specified offset in target file fhOut.dirtyPages.writerPattern.MonitorWriteAt(int64(in.OffOut), int(in.Len)) fhOut.entry.Content = nil - fhOut.dirtyPages.AddPage(int64(in.OffOut), data, fhOut.dirtyPages.writerPattern.IsSequentialMode()) + fhOut.dirtyPages.AddPage(int64(in.OffOut), data, fhOut.dirtyPages.writerPattern.IsSequentialMode(), time.Now().UnixNano()) fhOut.entry.Attributes.FileSize = uint64(max(int64(in.OffOut)+totalRead, int64(fhOut.entry.Attributes.FileSize))) fhOut.dirtyMetadata = true written = uint32(totalRead) diff --git a/weed/mount/weedfs_file_lseek.go b/weed/mount/weedfs_file_lseek.go index 9d6402f96..93fc65247 100644 --- a/weed/mount/weedfs_file_lseek.go +++ b/weed/mount/weedfs_file_lseek.go @@ -1,7 +1,6 @@ package mount import ( - "context" "syscall" "github.com/hanwen/go-fuse/v2/fuse" @@ -36,8 +35,8 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO } // lock the file until the proper offset was calculated - fh.orderedMutex.Acquire(context.Background(), 1) - defer fh.orderedMutex.Release(1) + fh.Lock() + defer fh.Unlock() fh.entryLock.Lock() defer fh.entryLock.Unlock() @@ -56,17 +55,8 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO return ENXIO } - // refresh view cache if necessary - if fh.entryViewCache == nil { - var err error - fh.entryViewCache, err = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), fh.entry.GetChunks(), 0, fileSize) - if err != nil { - return fuse.EIO - } - } - // search chunks for the offset - found, offset := searchChunks(fh, offset, fileSize, in.Whence) + found, offset := fh.entryChunkGroup.SearchChunks(offset, fileSize, in.Whence) if found { out.Offset = uint64(offset) return fuse.OK @@ -82,30 +72,3 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO return fuse.OK } - -// searchChunks goes through all chunks to find the correct offset -func searchChunks(fh *FileHandle, offset, fileSize int64, whence uint32) (found bool, out int64) { - chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, offset, fileSize) - - for _, chunkView := range chunkViews { - if offset < chunkView.LogicOffset { - if whence == SEEK_HOLE { - out = offset - } else { - out = chunkView.LogicOffset - } - - return true, out - } - - if offset >= chunkView.LogicOffset && offset < chunkView.Offset+int64(chunkView.Size) && whence == SEEK_DATA { - out = offset - - return true, out - } - - offset += int64(chunkView.Size) - } - - return -} diff --git a/weed/mount/weedfs_file_read.go b/weed/mount/weedfs_file_read.go index 8375f9a5d..cedece137 100644 --- a/weed/mount/weedfs_file_read.go +++ b/weed/mount/weedfs_file_read.go @@ -1,7 +1,8 @@ package mount import ( - "context" + "bytes" + "fmt" "io" "github.com/hanwen/go-fuse/v2/fuse" @@ -40,8 +41,8 @@ func (wfs *WFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buff []byte) (fuse return nil, fuse.ENOENT } - fh.orderedMutex.Acquire(context.Background(), 1) - defer fh.orderedMutex.Release(1) + fh.Lock() + defer fh.Unlock() offset := int64(in.Offset) totalRead, err := readDataByFileHandle(buff, fh, offset) @@ -50,6 +51,23 @@ func (wfs *WFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buff []byte) (fuse return nil, fuse.EIO } + if IsDebugFileReadWrite { + // print(".") + mirrorData := make([]byte, totalRead) + fh.mirrorFile.ReadAt(mirrorData, offset) + if bytes.Compare(mirrorData, buff[:totalRead]) != 0 { + + againBuff := make([]byte, len(buff)) + againRead, _ := readDataByFileHandle(buff, fh, offset) + againCorrect := bytes.Compare(mirrorData, againBuff[:againRead]) == 0 + againSame := bytes.Compare(buff[:totalRead], againBuff[:againRead]) == 0 + + fmt.Printf("\ncompare %v [%d,%d) size:%d againSame:%v againCorrect:%v\n", fh.mirrorFile.Name(), offset, offset+totalRead, totalRead, againSame, againCorrect) + //fmt.Printf("read mirrow data: %v\n", mirrorData) + //fmt.Printf("read actual data: %v\n", buff[:totalRead]) + } + } + return fuse.ReadResultData(buff[:totalRead]), fuse.OK } @@ -59,9 +77,9 @@ func readDataByFileHandle(buff []byte, fhIn *FileHandle, offset int64) (int64, e fhIn.lockForRead(offset, size) defer fhIn.unlockForRead(offset, size) - n, err := fhIn.readFromChunks(buff, offset) + n, tsNs, err := fhIn.readFromChunks(buff, offset) if err == nil || err == io.EOF { - maxStop := fhIn.readFromDirtyPages(buff, offset) + maxStop := fhIn.readFromDirtyPages(buff, offset, tsNs) n = max(maxStop-offset, n) } if err == io.EOF { diff --git a/weed/mount/weedfs_file_sync.go b/weed/mount/weedfs_file_sync.go index 7b7c66680..ac18e05ea 100644 --- a/weed/mount/weedfs_file_sync.go +++ b/weed/mount/weedfs_file_sync.go @@ -89,8 +89,8 @@ func (wfs *WFS) Fsync(cancel <-chan struct{}, in *fuse.FsyncIn) (code fuse.Statu } func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { - fh.orderedMutex.Acquire(context.Background(), 1) - defer fh.orderedMutex.Release(1) + fh.Lock() + defer fh.Unlock() // flush works at fh level fileFullPath := fh.FullPath() @@ -145,9 +145,9 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { } glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.GetChunks())) - for i, chunk := range entry.GetChunks() { - glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) - } + //for i, chunk := range entry.GetChunks() { + // glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + //} manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.GetChunks()) @@ -158,6 +158,7 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { glog.V(0).Infof("MaybeManifestize: %v", manifestErr) } entry.Chunks = append(chunks, manifestChunks...) + fh.entryChunkGroup.SetChunks(entry.Chunks) wfs.mapPbIdFromLocalToFiler(request.Entry) defer wfs.mapPbIdFromFilerToLocal(request.Entry) @@ -181,5 +182,9 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { return fuse.EIO } + if IsDebugFileReadWrite { + fh.mirrorFile.Sync() + } + return fuse.OK } diff --git a/weed/mount/weedfs_file_write.go b/weed/mount/weedfs_file_write.go index 7b13d54ff..5a9a21ded 100644 --- a/weed/mount/weedfs_file_write.go +++ b/weed/mount/weedfs_file_write.go @@ -1,10 +1,10 @@ package mount import ( - "context" "github.com/hanwen/go-fuse/v2/fuse" "net/http" "syscall" + "time" ) /** @@ -46,8 +46,10 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr fh.dirtyPages.writerPattern.MonitorWriteAt(int64(in.Offset), int(in.Size)) - fh.orderedMutex.Acquire(context.Background(), 1) - defer fh.orderedMutex.Release(1) + tsNs := time.Now().UnixNano() + + fh.Lock() + defer fh.Unlock() entry := fh.GetEntry() if entry == nil { @@ -59,7 +61,7 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr entry.Attributes.FileSize = uint64(max(offset+int64(len(data)), int64(entry.Attributes.FileSize))) // glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) - fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsSequentialMode()) + fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsSequentialMode(), tsNs) written = uint32(len(data)) @@ -70,5 +72,10 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr fh.dirtyMetadata = true + if IsDebugFileReadWrite { + // print("+") + fh.mirrorFile.WriteAt(data, offset) + } + return written, fuse.OK } diff --git a/weed/mount/weedfs_write.go b/weed/mount/weedfs_write.go index e18a4a358..4c8470245 100644 --- a/weed/mount/weedfs_write.go +++ b/weed/mount/weedfs_write.go @@ -13,7 +13,7 @@ import ( func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType { - return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, err error) { + return func(reader io.Reader, filename string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { fileId, uploadResult, err, data := operation.UploadWithRetry( wfs, @@ -56,7 +56,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun wfs.chunkCache.SetChunk(fileId, data) } - chunk = uploadResult.ToPbFileChunk(fileId, offset) + chunk = uploadResult.ToPbFileChunk(fileId, offset, tsNs) return chunk, nil } } diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index ed38dfa6b..0c3e29a43 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -45,13 +45,13 @@ type UploadResult struct { RetryCount int `json:"-"` } -func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk { +func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64, tsNs int64) *filer_pb.FileChunk { fid, _ := filer_pb.ToFileIdObject(fileId) return &filer_pb.FileChunk{ FileId: fileId, Offset: offset, Size: uint64(uploadResult.Size), - ModifiedTsNs: time.Now().UnixNano(), + ModifiedTsNs: tsNs, ETag: uploadResult.ContentMd5, CipherKey: uploadResult.CipherKey, IsCompressed: uploadResult.Gzip > 0, diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go index ec0e80b2e..9682ca623 100644 --- a/weed/replication/repl_util/replication_util.go +++ b/weed/replication/repl_util/replication_util.go @@ -7,9 +7,10 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" ) -func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.FilerSource, writeFunc func(data []byte) error) error { +func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerSource *source.FilerSource, writeFunc func(data []byte) error) error { - for _, chunk := range chunkViews { + for x := chunkViews.Front(); x != nil; x = x.Next { + chunk := x.Value fileUrls, err := filerSource.LookupFileId(chunk.FileId) if err != nil { @@ -20,7 +21,7 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer var shouldRetry bool for _, fileUrl := range fileUrls { - shouldRetry, err = util.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { + shouldRetry, err = util.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.OffsetInChunk, int(chunk.ViewSize), func(data []byte) { writeErr = writeFunc(data) }) if err != nil { diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index c671deb76..8b3fc45fb 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -256,7 +256,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType { - return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, error) { + return func(reader io.Reader, name string, offset int64, tsNs int64) (*filer_pb.FileChunk, error) { var fileId string var uploadResult *operation.UploadResult @@ -290,7 +290,7 @@ func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAs return nil, err } - return uploadResult.ToPbFileChunk(fileId, offset), nil + return uploadResult.ToPbFileChunk(fileId, offset, tsNs), nil } } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index bd8761077..bb5659437 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -59,7 +59,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht } // Save to chunk manifest structure - fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)} + fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0, time.Now().UnixNano())} // fmt.Printf("uploaded: %+v\n", uploadResult) diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index 95920583d..cc43eba64 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -214,5 +214,5 @@ func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, ch if uploadResult.Size == 0 { return nil, nil } - return []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, chunkOffset)}, nil + return []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, chunkOffset, time.Now().UnixNano())}, nil } diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 80b882181..79416d519 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -102,14 +102,14 @@ func (fi *FileInfo) IsDir() bool { return fi.isDirectory } func (fi *FileInfo) Sys() interface{} { return nil } type WebDavFile struct { - fs *WebDavFileSystem - name string - isDirectory bool - off int64 - entry *filer_pb.Entry - entryViewCache []filer.VisibleInterval - reader io.ReaderAt - bufWriter *buffered_writer.BufferedWriteCloser + fs *WebDavFileSystem + name string + isDirectory bool + off int64 + entry *filer_pb.Entry + visibleIntervals *filer.IntervalList[*filer.VisibleInterval] + reader io.ReaderAt + bufWriter *buffered_writer.BufferedWriteCloser } func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { @@ -381,7 +381,7 @@ func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, return fs.stat(ctx, name) } -func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, err error) { +func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { fileId, uploadResult, flushErr, _ := operation.UploadWithRetry( f.fs, @@ -413,7 +413,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64 glog.V(0).Infof("upload failure %v: %v", f.name, flushErr) return nil, fmt.Errorf("upload result: %v", uploadResult.Error) } - return uploadResult.ToPbFileChunk(fileId, offset), nil + return uploadResult.ToPbFileChunk(fileId, offset, tsNs), nil } func (f *WebDavFile) Write(buf []byte) (int, error) { @@ -439,7 +439,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { f.bufWriter.FlushFunc = func(data []byte, offset int64) (flushErr error) { var chunk *filer_pb.FileChunk - chunk, flushErr = f.saveDataAsChunk(util.NewBytesReader(data), f.name, offset) + chunk, flushErr = f.saveDataAsChunk(util.NewBytesReader(data), f.name, offset, time.Now().UnixNano()) if flushErr != nil { return fmt.Errorf("%s upload result: %v", f.name, flushErr) @@ -498,7 +498,7 @@ func (f *WebDavFile) Close() error { if f.entry != nil { f.entry = nil - f.entryViewCache = nil + f.visibleIntervals = nil } return err @@ -521,12 +521,12 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { if fileSize == 0 { return 0, io.EOF } - if f.entryViewCache == nil { - f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.GetChunks(), 0, fileSize) + if f.visibleIntervals == nil { + f.visibleIntervals, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.GetChunks(), 0, fileSize) f.reader = nil } if f.reader == nil { - chunkViews := filer.ViewFromVisibleIntervals(f.entryViewCache, 0, fileSize) + chunkViews := filer.ViewFromVisibleIntervals(f.visibleIntervals, 0, fileSize) f.reader = filer.NewChunkReaderAtFromClient(filer.LookupFn(f.fs), chunkViews, f.fs.chunkCache, fileSize) } diff --git a/weed/shell/command_fs_verify.go b/weed/shell/command_fs_verify.go index 11cc64d78..07f3fd9c1 100644 --- a/weed/shell/command_fs_verify.go +++ b/weed/shell/command_fs_verify.go @@ -117,7 +117,7 @@ type ItemEntry struct { func (c *commandFsVerify) verifyTraverseBfs(path string) (fileCount int64, errCount int64, err error) { timeNowAtSec := time.Now().Unix() - return fileCount, errCount, doTraverseBfsAndSaving(c.env, nil, path, false, + return fileCount, errCount, doTraverseBfsAndSaving(c.env, c.writer, path, false, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) { if c.modifyTimeAgoAtSec > 0 { if entry.Entry.Attributes != nil && c.modifyTimeAgoAtSec < timeNowAtSec-entry.Entry.Attributes.Mtime { diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go index 559b11cd3..be9bd2db2 100644 --- a/weed/shell/command_volume_fsck.go +++ b/weed/shell/command_volume_fsck.go @@ -211,7 +211,7 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m } }() - return doTraverseBfsAndSaving(c.env, nil, c.getCollectFilerFilePath(), false, + return doTraverseBfsAndSaving(c.env, c.writer, c.getCollectFilerFilePath(), false, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) { if *c.verbose && entry.Entry.IsDirectory { fmt.Fprintf(c.writer, "checking directory %s\n", util.NewFullPath(entry.Dir, entry.Entry.Name)) From 48f2edc06503d9a915fe8eb4cecbfc83f1467ce9 Mon Sep 17 00:00:00 2001 From: chrislu Date: Mon, 2 Jan 2023 23:23:02 -0800 Subject: [PATCH 10/19] 3.38 --- k8s/helm_charts2/Chart.yaml | 4 ++-- weed/util/constants.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/helm_charts2/Chart.yaml b/k8s/helm_charts2/Chart.yaml index 008a90286..b8afcf833 100644 --- a/k8s/helm_charts2/Chart.yaml +++ b/k8s/helm_charts2/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -appVersion: "3.37" -version: "3.37" +appVersion: "3.38" +version: "3.38" diff --git a/weed/util/constants.go b/weed/util/constants.go index a224a313e..2bffc2dd3 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,7 +5,7 @@ import ( ) var ( - VERSION_NUMBER = fmt.Sprintf("%.02f", 3.37) + VERSION_NUMBER = fmt.Sprintf("%.02f", 3.38) VERSION = sizeLimit + " " + VERSION_NUMBER COMMIT = "" ) From 6b4c0334311e06084723c5984afea3624c8235f1 Mon Sep 17 00:00:00 2001 From: zemul Date: Tue, 3 Jan 2023 16:00:45 +0800 Subject: [PATCH 11/19] add mount log (#4101) * filer.backup use replication.source.filer * add mount log * Revert "filer.backup use replication.source.filer" This reverts commit 07bf6f956c67b19ceed0f62e7d01e8ef1fdf6454. * fix Co-authored-by: zemul --- weed/command/mount_std.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 40b285ccf..33061146a 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -256,7 +256,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { seaweedFileSystem.StartBackgroundTasks() - fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) + glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir) + glog.V(0).Infof("This is SeaweedFS version %s %s %s", util.Version(), runtime.GOOS, runtime.GOARCH) server.Serve() From c87a3ffce3278ac8710c25b872930ec3c6078ee0 Mon Sep 17 00:00:00 2001 From: chrislu Date: Tue, 3 Jan 2023 12:13:52 -0800 Subject: [PATCH 12/19] avoid hard coded versions --- k8s/helm_charts2/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/k8s/helm_charts2/Chart.yaml b/k8s/helm_charts2/Chart.yaml index b8afcf833..16041acea 100644 --- a/k8s/helm_charts2/Chart.yaml +++ b/k8s/helm_charts2/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -appVersion: "3.38" -version: "3.38" +appVersion: "latest" +version: "latest" From 7bdae5172e58e0b43d90f3f40df577857d004e40 Mon Sep 17 00:00:00 2001 From: chrislu Date: Tue, 3 Jan 2023 22:05:26 -0800 Subject: [PATCH 13/19] batch delete EC needles fix https://github.com/seaweedfs/seaweedfs/issues/4107 --- weed/server/volume_grpc_batch_delete.go | 64 ++++++++++++++++++------- 1 file changed, 46 insertions(+), 18 deletions(-) diff --git a/weed/server/volume_grpc_batch_delete.go b/weed/server/volume_grpc_batch_delete.go index 25780ec75..8deb96a80 100644 --- a/weed/server/volume_grpc_batch_delete.go +++ b/weed/server/volume_grpc_batch_delete.go @@ -28,6 +28,7 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B n := new(needle.Needle) volumeId, _ := needle.NewVolumeId(vid) + ecVolume, isEcVolume := vs.store.FindEcVolume(volumeId) if req.SkipCookieCheck { n.Id, _, err = needle.ParseNeedleIdCookie(id_cookie) if err != nil { @@ -40,13 +41,24 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B } else { n.ParsePath(id_cookie) cookie := n.Cookie - if _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil, nil); err != nil { - resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ - FileId: fid, - Status: http.StatusNotFound, - Error: err.Error(), - }) - continue + if !isEcVolume { + if _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil, nil); err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusNotFound, + Error: err.Error(), + }) + continue + } + } else { + if _, err := vs.store.ReadEcShardNeedle(volumeId, n, nil); err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusNotFound, + Error: err.Error(), + }) + continue + } } if n.Cookie != cookie { resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ @@ -68,18 +80,34 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B } n.LastModified = now - if size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil { - resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ - FileId: fid, - Status: http.StatusInternalServerError, - Error: err.Error()}, - ) + if !isEcVolume { + if size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusInternalServerError, + Error: err.Error()}, + ) + } else { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusAccepted, + Size: uint32(size)}, + ) + } } else { - resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ - FileId: fid, - Status: http.StatusAccepted, - Size: uint32(size)}, - ) + if size, err := vs.store.DeleteEcShardNeedle(ecVolume, n, n.Cookie); err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusInternalServerError, + Error: err.Error()}, + ) + } else { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusAccepted, + Size: uint32(size)}, + ) + } } } From 5423790b2c48c7922860f1471e1c55603d789b06 Mon Sep 17 00:00:00 2001 From: chrislu Date: Tue, 3 Jan 2023 22:50:39 -0800 Subject: [PATCH 14/19] add back previous chunk upload selection algo --- weed/mount/page_writer/page_chunk.go | 1 + weed/mount/page_writer/page_chunk_mem.go | 7 +++++ weed/mount/page_writer/page_chunk_swapfile.go | 6 +++++ weed/mount/page_writer/upload_pipeline.go | 26 +++++++++++++++---- 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/weed/mount/page_writer/page_chunk.go b/weed/mount/page_writer/page_chunk.go index 32d246deb..ac1d24622 100644 --- a/weed/mount/page_writer/page_chunk.go +++ b/weed/mount/page_writer/page_chunk.go @@ -12,5 +12,6 @@ type PageChunk interface { ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) IsComplete() bool ActivityScore() int64 + WrittenSize() int64 SaveContent(saveFn SaveToStorageFunc) } diff --git a/weed/mount/page_writer/page_chunk_mem.go b/weed/mount/page_writer/page_chunk_mem.go index 1ec8cecb4..cbd82c953 100644 --- a/weed/mount/page_writer/page_chunk_mem.go +++ b/weed/mount/page_writer/page_chunk_mem.go @@ -86,6 +86,13 @@ func (mc *MemChunk) ActivityScore() int64 { return mc.activityScore.ActivityScore() } +func (mc *MemChunk) WrittenSize() int64 { + mc.RLock() + defer mc.RUnlock() + + return mc.usage.WrittenSize() +} + func (mc *MemChunk) SaveContent(saveFn SaveToStorageFunc) { mc.RLock() defer mc.RUnlock() diff --git a/weed/mount/page_writer/page_chunk_swapfile.go b/weed/mount/page_writer/page_chunk_swapfile.go index 6cedc64df..10060bef9 100644 --- a/weed/mount/page_writer/page_chunk_swapfile.go +++ b/weed/mount/page_writer/page_chunk_swapfile.go @@ -161,6 +161,12 @@ func (sc *SwapFileChunk) ActivityScore() int64 { return sc.activityScore.ActivityScore() } +func (sc *SwapFileChunk) WrittenSize() int64 { + sc.RLock() + defer sc.RUnlock() + return sc.usage.WrittenSize() +} + func (sc *SwapFileChunk) SaveContent(saveFn SaveToStorageFunc) { sc.RLock() defer sc.RUnlock() diff --git a/weed/mount/page_writer/upload_pipeline.go b/weed/mount/page_writer/upload_pipeline.go index 6065f2f76..e1aa43fe2 100644 --- a/weed/mount/page_writer/upload_pipeline.go +++ b/weed/mount/page_writer/upload_pipeline.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/util" - "math" "sync" "sync/atomic" ) @@ -67,15 +66,32 @@ func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool, tsN if !found { if len(up.writableChunks) > up.writableChunkLimit { // if current file chunks is over the per file buffer count limit - laziestChunkIndex, lowestActivityScore := LogicChunkIndex(-1), int64(math.MaxInt64) + candidateChunkIndex, fullness := LogicChunkIndex(-1), int64(0) + for lci, mc := range up.writableChunks { + chunkFullness := mc.WrittenSize() + if fullness < chunkFullness { + candidateChunkIndex = lci + fullness = chunkFullness + } + } + /* // this algo generates too many chunks + candidateChunkIndex, lowestActivityScore := LogicChunkIndex(-1), int64(math.MaxInt64) for wci, wc := range up.writableChunks { activityScore := wc.ActivityScore() - if lowestActivityScore > activityScore { - laziestChunkIndex = wci + if lowestActivityScore >= activityScore { + if lowestActivityScore == activityScore { + chunkFullness := wc.WrittenSize() + if fullness < chunkFullness { + candidateChunkIndex = lci + fullness = chunkFullness + } + } + candidateChunkIndex = wci lowestActivityScore = activityScore } } - up.moveToSealed(up.writableChunks[laziestChunkIndex], laziestChunkIndex) + */ + up.moveToSealed(up.writableChunks[candidateChunkIndex], candidateChunkIndex) // fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, oldestTs) } if isSequential && From e367444586f338318e025c659aa8e11b8506aff2 Mon Sep 17 00:00:00 2001 From: chrislu Date: Wed, 4 Jan 2023 09:52:25 -0800 Subject: [PATCH 15/19] add notes for full version --- weed/filer/configuration.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go index 4ed19eee3..db4af1559 100644 --- a/weed/filer/configuration.go +++ b/weed/filer/configuration.go @@ -33,7 +33,7 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { if !hasDefaultStoreConfigured { println() - println("Supported filer stores are:") + println("Supported filer stores are the following. If not found, check the full version.") for _, store := range Stores { println(" " + store.GetName()) } From 9ffe1d6aec9feee9731816bcaf2040fcb03317f5 Mon Sep 17 00:00:00 2001 From: Tobias Gurtzick Date: Thu, 5 Jan 2023 20:01:22 +0100 Subject: [PATCH 16/19] add code of conduct (#4109) Signed-off-by: Tobias Gurtzick Signed-off-by: Tobias Gurtzick --- CODE_OF_CONDUCT.md | 74 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..c561b2fa2 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ From 2abf817580aa127fbd2943fe678567636147ebba Mon Sep 17 00:00:00 2001 From: chrislu Date: Thu, 5 Jan 2023 11:19:21 -0800 Subject: [PATCH 17/19] fix for stream reader fix https://github.com/seaweedfs/seaweedfs/issues/4112 --- weed/filer/stream.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/weed/filer/stream.go b/weed/filer/stream.go index d49784686..d23a1aeab 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -187,6 +187,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer // ---------------- ChunkStreamReader ---------------------------------- type ChunkStreamReader struct { + head *Interval[*ChunkView] chunkView *Interval[*ChunkView] totalSize int64 logicOffset int64 @@ -211,6 +212,7 @@ func doNewChunkStreamReader(lookupFileIdFn wdclient.LookupFileIdFunctionType, ch } return &ChunkStreamReader{ + head: chunkViews.Front(), chunkView: chunkViews.Front(), lookupFileId: lookupFileIdFn, totalSize: totalSize, @@ -309,15 +311,20 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { chunk := c.chunkView.Value if insideChunk(offset, chunk) { if c.isBufferEmpty() || c.bufferOffset != chunk.ViewOffset { - if err = c.fetchChunkToBuffer(chunk); err != nil { - return - } + return c.fetchChunkToBuffer(chunk) } } else { - // glog.Fatalf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) - return fmt.Errorf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) + for p := c.head; p != nil; p = p.Next { + chunk = p.Value + if insideChunk(offset, chunk) { + if c.isBufferEmpty() || c.bufferOffset != chunk.ViewOffset { + return c.fetchChunkToBuffer(chunk) + } + } + } } - return + + return io.EOF } func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { From 296fdc296c8bd672d86c9aef19e9f568dab1b8a0 Mon Sep 17 00:00:00 2001 From: chrislu Date: Fri, 6 Jan 2023 01:03:29 -0800 Subject: [PATCH 18/19] mount: faster add chunks --- weed/filer/filechunk_section.go | 15 +++++++++++---- weed/filer/filechunks.go | 11 +++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/weed/filer/filechunk_section.go b/weed/filer/filechunk_section.go index 60c919569..5804c7160 100644 --- a/weed/filer/filechunk_section.go +++ b/weed/filer/filechunk_section.go @@ -32,10 +32,17 @@ func (section *FileChunkSection) addChunk(chunk *filer_pb.FileChunk) error { if section.visibleIntervals != nil { MergeIntoVisibles(section.visibleIntervals, start, stop, chunk) - } - - if section.visibleIntervals != nil { - section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks) + garbageFileIds := FindGarbageChunks(section.visibleIntervals, start, stop) + for _, garbageFileId := range garbageFileIds { + length := len(section.chunks) + for i, t := range section.chunks { + if t.FileId == garbageFileId { + section.chunks[i] = section.chunks[length-1] + section.chunks = section.chunks[:length-1] + break + } + } + } } if section.chunkViews != nil { diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index d872bd22d..480478fd7 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -86,6 +86,17 @@ func SeparateGarbageChunks(visibles *IntervalList[*VisibleInterval], chunks []*f return compacted, garbage } +func FindGarbageChunks(visibles *IntervalList[*VisibleInterval], start int64, stop int64) (garbageFileId []string) { + for x := visibles.Front(); x != nil; x = x.Next { + interval := x.Value + offset := interval.start - interval.offsetInChunk + if start <= offset && offset+int64(interval.chunkSize) <= stop { + garbageFileId = append(garbageFileId, interval.fileId) + } + } + return +} + func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) { aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64) From 3e2c9ea73ddc577da64e5c8596f8faae884c6840 Mon Sep 17 00:00:00 2001 From: monchickey <75814968+monchickey@users.noreply.github.com> Date: Sat, 7 Jan 2023 01:28:07 +0800 Subject: [PATCH 19/19] Add image cropping. (#4117) --- weed/images/cropping.go | 47 +++++++++++++++++++ weed/images/cropping_test.go | 22 +++++++++ weed/server/volume_server_handlers_read.go | 53 +++++++++++++++++++--- 3 files changed, 116 insertions(+), 6 deletions(-) create mode 100644 weed/images/cropping.go create mode 100644 weed/images/cropping_test.go diff --git a/weed/images/cropping.go b/weed/images/cropping.go new file mode 100644 index 000000000..07a3f41ad --- /dev/null +++ b/weed/images/cropping.go @@ -0,0 +1,47 @@ +package images + +import ( + "bytes" + "image" + "image/gif" + "image/jpeg" + "image/png" + "io" + + "github.com/disintegration/imaging" + + "github.com/seaweedfs/seaweedfs/weed/glog" +) + +func Cropped(ext string, read io.ReadSeeker, x1, y1, x2, y2 int) (cropped io.ReadSeeker, err error) { + srcImage, _, err := image.Decode(read) + if err != nil { + glog.Error(err) + return read, err + } + + bounds := srcImage.Bounds() + if x2 > bounds.Dx() || y2 > bounds.Dy() { + read.Seek(0, 0) + return read, nil + } + + rectangle := image.Rect(x1, y1, x2, y2) + dstImage := imaging.Crop(srcImage, rectangle) + var buf bytes.Buffer + switch ext { + case ".jpg", ".jpeg": + if err = jpeg.Encode(&buf, dstImage, nil); err != nil { + glog.Error(err) + } + case ".png": + if err = png.Encode(&buf, dstImage); err != nil { + glog.Error(err) + } + case ".gif": + if err = gif.Encode(&buf, dstImage, nil); err != nil { + glog.Error(err) + } + } + return bytes.NewReader(buf.Bytes()), err +} diff --git a/weed/images/cropping_test.go b/weed/images/cropping_test.go new file mode 100644 index 000000000..284432e3a --- /dev/null +++ b/weed/images/cropping_test.go @@ -0,0 +1,22 @@ +package images + +import ( + "bytes" + "os" + "testing" + + "github.com/seaweedfs/seaweedfs/weed/util" +) + +func TestCropping(t *testing.T) { + fname := "sample1.jpg" + + dat, _ := os.ReadFile(fname) + + cropped, _ := Cropped(".jpg", bytes.NewReader(dat), 1072, 932, 1751, 1062) + buf := new(bytes.Buffer) + buf.ReadFrom(cropped) + + util.WriteFile("cropped1.jpg", buf.Bytes(), 0644) + +} diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 8ad526d59..10ff15d92 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -5,8 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/util/mem" "io" "mime" "net/http" @@ -17,6 +15,9 @@ import ( "sync/atomic" "time" + "github.com/seaweedfs/seaweedfs/weed/storage/types" + "github.com/seaweedfs/seaweedfs/weed/util/mem" + "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/images" "github.com/seaweedfs/seaweedfs/weed/operation" @@ -204,7 +205,9 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } if n.IsCompressed() { - if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize { + _, _, _, shouldResize := shouldResizeImages(ext, r) + _, _, _, _, shouldCrop := shouldCropImages(ext, r) + if shouldResize || shouldCrop { if n.Data, err = util.DecompressData(n.Data); err != nil { glog.V(0).Infoln("ungzip error:", err, r.URL.Path) } @@ -220,7 +223,8 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } if !readOption.IsMetaOnly { - rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r) + rs := conditionallyCropImages(bytes.NewReader(n.Data), ext, r) + rs = conditionallyResizeImages(rs, ext, r) if e := writeResponseContent(filename, mtype, rs, w, r); e != nil { glog.V(2).Infoln("response write error:", e) } @@ -240,7 +244,8 @@ func shouldAttemptStreamWrite(hasLocalVolume bool, ext string, r *http.Request) return true, true } _, _, _, shouldResize := shouldResizeImages(ext, r) - if shouldResize { + _, _, _, _, shouldCrop := shouldCropImages(ext, r) + if shouldResize || shouldCrop { return false, false } return true, false @@ -277,7 +282,8 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, chunkedFileReader := operation.NewChunkedFileReader(chunkManifest.Chunks, vs.GetMaster(), vs.grpcDialOption) defer chunkedFileReader.Close() - rs := conditionallyResizeImages(chunkedFileReader, ext, r) + rs := conditionallyCropImages(chunkedFileReader, ext, r) + rs = conditionallyResizeImages(rs, ext, r) if e := writeResponseContent(fileName, mType, rs, w, r); e != nil { glog.V(2).Infoln("response write error:", e) @@ -311,6 +317,41 @@ func shouldResizeImages(ext string, r *http.Request) (width, height int, mode st return } +func conditionallyCropImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker { + rs := originalDataReaderSeeker + if len(ext) > 0 { + ext = strings.ToLower(ext) + } + x1, y1, x2, y2, shouldCrop := shouldCropImages(ext, r) + if shouldCrop { + var err error + rs, err = images.Cropped(ext, rs, x1, y1, x2, y2) + if err != nil { + glog.Errorf("Cropping images error: %s", err) + } + } + return rs +} + +func shouldCropImages(ext string, r *http.Request) (x1, y1, x2, y2 int, shouldCrop bool) { + if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" { + if r.FormValue("crop_x1") != "" { + x1, _ = strconv.Atoi(r.FormValue("crop_x1")) + } + if r.FormValue("crop_y1") != "" { + y1, _ = strconv.Atoi(r.FormValue("crop_y1")) + } + if r.FormValue("crop_x2") != "" { + x2, _ = strconv.Atoi(r.FormValue("crop_x2")) + } + if r.FormValue("crop_y2") != "" { + y2, _ = strconv.Atoi(r.FormValue("crop_y2")) + } + } + shouldCrop = x1 >= 0 && y1 >= 0 && x2 > x1 && y2 > y1 + return +} + func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { totalSize, e := rs.Seek(0, 2) if mimeType == "" {