diff --git a/weed/admin/dash/ec_shard_management.go b/weed/admin/dash/ec_shard_management.go index 34574ecdb..82aa4074d 100644 --- a/weed/admin/dash/ec_shard_management.go +++ b/weed/admin/dash/ec_shard_management.go @@ -68,7 +68,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, // Create individual shard entries for each shard this server has shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { + for shardId := 0; shardId < erasure_coding.MaxShardCount; shardId++ { if (shardBits & (1 << uint(shardId))) != 0 { // Mark this shard as present for this volume volumeShardsMap[volumeId][shardId] = true @@ -112,6 +112,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, shardCount := len(shardsPresent) // Find which shards are missing for this volume across ALL servers + // Uses default 10+4 (14 total shards) for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { if !shardsPresent[shardId] { missingShards = append(missingShards, shardId) @@ -332,7 +333,7 @@ func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, // Process each shard this server has for this volume shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { + for shardId := 0; shardId < erasure_coding.MaxShardCount; shardId++ { if (shardBits & (1 << uint(shardId))) != 0 { // Record shard location volume.ShardLocations[shardId] = node.Id @@ -392,7 +393,7 @@ func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, for _, volume := range volumeData { volume.TotalShards = len(volume.ShardLocations) - // Find missing shards + // Find missing shards (default 10+4 = 14 total shards) var missingShards []int for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { if _, exists := volume.ShardLocations[shardId]; !exists { @@ -523,7 +524,7 @@ func sortEcVolumes(volumes []EcVolumeWithShards, sortBy string, sortOrder string // getShardCount returns the number of shards represented by the bitmap func getShardCount(ecIndexBits uint32) int { count := 0 - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + for i := 0; i < erasure_coding.MaxShardCount; i++ { if (ecIndexBits & (1 << uint(i))) != 0 { count++ } @@ -532,6 +533,7 @@ func getShardCount(ecIndexBits uint32) int { } // getMissingShards returns a slice of missing shard IDs for a volume +// Assumes default 10+4 EC configuration (14 total shards) func getMissingShards(ecIndexBits uint32) []int { var missing []int for i := 0; i < erasure_coding.TotalShardsCount; i++ { @@ -614,7 +616,7 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrd // Create individual shard entries for each shard this server has shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { + for shardId := 0; shardId < erasure_coding.MaxShardCount; shardId++ { if (shardBits & (1 << uint(shardId))) != 0 { ecShard := EcShardWithInfo{ VolumeID: ecShardInfo.Id, @@ -698,6 +700,7 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrd } totalUniqueShards := len(foundShards) + // Check completeness using default 10+4 (14 total shards) isComplete := (totalUniqueShards == erasure_coding.TotalShardsCount) // Calculate missing shards diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index fcdad30ff..d0d664f74 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -525,6 +525,13 @@ message VolumeInfo { int64 dat_file_size = 5; // store the original dat file size uint64 expire_at_sec = 6; // expiration time of ec volume bool read_only = 7; + EcShardConfig ec_shard_config = 8; // EC shard configuration (optional, null = use default 10+4) +} + +// EcShardConfig specifies erasure coding shard configuration +message EcShardConfig { + uint32 data_shards = 1; // Number of data shards (e.g., 10) + uint32 parity_shards = 2; // Number of parity shards (e.g., 4) } message OldVersionVolumeInfo { repeated RemoteFile files = 1; diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 503db63ef..27e791be5 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -4442,6 +4442,7 @@ type VolumeInfo struct { DatFileSize int64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` // store the original dat file size ExpireAtSec uint64 `protobuf:"varint,6,opt,name=expire_at_sec,json=expireAtSec,proto3" json:"expire_at_sec,omitempty"` // expiration time of ec volume ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + EcShardConfig *EcShardConfig `protobuf:"bytes,8,opt,name=ec_shard_config,json=ecShardConfig,proto3" json:"ec_shard_config,omitempty"` // EC shard configuration (optional, null = use default 10+4) unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -4525,6 +4526,66 @@ func (x *VolumeInfo) GetReadOnly() bool { return false } +func (x *VolumeInfo) GetEcShardConfig() *EcShardConfig { + if x != nil { + return x.EcShardConfig + } + return nil +} + +// EcShardConfig specifies erasure coding shard configuration +type EcShardConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + DataShards uint32 `protobuf:"varint,1,opt,name=data_shards,json=dataShards,proto3" json:"data_shards,omitempty"` // Number of data shards (e.g., 10) + ParityShards uint32 `protobuf:"varint,2,opt,name=parity_shards,json=parityShards,proto3" json:"parity_shards,omitempty"` // Number of parity shards (e.g., 4) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EcShardConfig) Reset() { + *x = EcShardConfig{} + mi := &file_volume_server_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EcShardConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcShardConfig) ProtoMessage() {} + +func (x *EcShardConfig) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[80] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcShardConfig.ProtoReflect.Descriptor instead. +func (*EcShardConfig) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{80} +} + +func (x *EcShardConfig) GetDataShards() uint32 { + if x != nil { + return x.DataShards + } + return 0 +} + +func (x *EcShardConfig) GetParityShards() uint32 { + if x != nil { + return x.ParityShards + } + return 0 +} + type OldVersionVolumeInfo struct { state protoimpl.MessageState `protogen:"open.v1"` Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` @@ -4540,7 +4601,7 @@ type OldVersionVolumeInfo struct { func (x *OldVersionVolumeInfo) Reset() { *x = OldVersionVolumeInfo{} - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4552,7 +4613,7 @@ func (x *OldVersionVolumeInfo) String() string { func (*OldVersionVolumeInfo) ProtoMessage() {} func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[81] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4565,7 +4626,7 @@ func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use OldVersionVolumeInfo.ProtoReflect.Descriptor instead. func (*OldVersionVolumeInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{80} + return file_volume_server_proto_rawDescGZIP(), []int{81} } func (x *OldVersionVolumeInfo) GetFiles() []*RemoteFile { @@ -4630,7 +4691,7 @@ type VolumeTierMoveDatToRemoteRequest struct { func (x *VolumeTierMoveDatToRemoteRequest) Reset() { *x = VolumeTierMoveDatToRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4642,7 +4703,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) String() string { func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[82] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4655,7 +4716,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{81} + return file_volume_server_proto_rawDescGZIP(), []int{82} } func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { @@ -4696,7 +4757,7 @@ type VolumeTierMoveDatToRemoteResponse struct { func (x *VolumeTierMoveDatToRemoteResponse) Reset() { *x = VolumeTierMoveDatToRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4708,7 +4769,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) String() string { func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4721,7 +4782,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{82} + return file_volume_server_proto_rawDescGZIP(), []int{83} } func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { @@ -4749,7 +4810,7 @@ type VolumeTierMoveDatFromRemoteRequest struct { func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { *x = VolumeTierMoveDatFromRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4761,7 +4822,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) String() string { func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4774,7 +4835,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{83} + return file_volume_server_proto_rawDescGZIP(), []int{84} } func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { @@ -4808,7 +4869,7 @@ type VolumeTierMoveDatFromRemoteResponse struct { func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { *x = VolumeTierMoveDatFromRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4820,7 +4881,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) String() string { func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[85] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4833,7 +4894,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Messag // Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{84} + return file_volume_server_proto_rawDescGZIP(), []int{85} } func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { @@ -4858,7 +4919,7 @@ type VolumeServerStatusRequest struct { func (x *VolumeServerStatusRequest) Reset() { *x = VolumeServerStatusRequest{} - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4870,7 +4931,7 @@ func (x *VolumeServerStatusRequest) String() string { func (*VolumeServerStatusRequest) ProtoMessage() {} func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4883,7 +4944,7 @@ func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{85} + return file_volume_server_proto_rawDescGZIP(), []int{86} } type VolumeServerStatusResponse struct { @@ -4899,7 +4960,7 @@ type VolumeServerStatusResponse struct { func (x *VolumeServerStatusResponse) Reset() { *x = VolumeServerStatusResponse{} - mi := &file_volume_server_proto_msgTypes[86] + mi := &file_volume_server_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4911,7 +4972,7 @@ func (x *VolumeServerStatusResponse) String() string { func (*VolumeServerStatusResponse) ProtoMessage() {} func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[86] + mi := &file_volume_server_proto_msgTypes[87] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4924,7 +4985,7 @@ func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{86} + return file_volume_server_proto_rawDescGZIP(), []int{87} } func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { @@ -4970,7 +5031,7 @@ type VolumeServerLeaveRequest struct { func (x *VolumeServerLeaveRequest) Reset() { *x = VolumeServerLeaveRequest{} - mi := &file_volume_server_proto_msgTypes[87] + mi := &file_volume_server_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4982,7 +5043,7 @@ func (x *VolumeServerLeaveRequest) String() string { func (*VolumeServerLeaveRequest) ProtoMessage() {} func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[87] + mi := &file_volume_server_proto_msgTypes[88] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4995,7 +5056,7 @@ func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{87} + return file_volume_server_proto_rawDescGZIP(), []int{88} } type VolumeServerLeaveResponse struct { @@ -5006,7 +5067,7 @@ type VolumeServerLeaveResponse struct { func (x *VolumeServerLeaveResponse) Reset() { *x = VolumeServerLeaveResponse{} - mi := &file_volume_server_proto_msgTypes[88] + mi := &file_volume_server_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5018,7 +5079,7 @@ func (x *VolumeServerLeaveResponse) String() string { func (*VolumeServerLeaveResponse) ProtoMessage() {} func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[88] + mi := &file_volume_server_proto_msgTypes[89] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5031,7 +5092,7 @@ func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{88} + return file_volume_server_proto_rawDescGZIP(), []int{89} } // remote storage @@ -5053,7 +5114,7 @@ type FetchAndWriteNeedleRequest struct { func (x *FetchAndWriteNeedleRequest) Reset() { *x = FetchAndWriteNeedleRequest{} - mi := &file_volume_server_proto_msgTypes[89] + mi := &file_volume_server_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5065,7 +5126,7 @@ func (x *FetchAndWriteNeedleRequest) String() string { func (*FetchAndWriteNeedleRequest) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[89] + mi := &file_volume_server_proto_msgTypes[90] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5078,7 +5139,7 @@ func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleRequest.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{89} + return file_volume_server_proto_rawDescGZIP(), []int{90} } func (x *FetchAndWriteNeedleRequest) GetVolumeId() uint32 { @@ -5153,7 +5214,7 @@ type FetchAndWriteNeedleResponse struct { func (x *FetchAndWriteNeedleResponse) Reset() { *x = FetchAndWriteNeedleResponse{} - mi := &file_volume_server_proto_msgTypes[90] + mi := &file_volume_server_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5165,7 +5226,7 @@ func (x *FetchAndWriteNeedleResponse) String() string { func (*FetchAndWriteNeedleResponse) ProtoMessage() {} func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[90] + mi := &file_volume_server_proto_msgTypes[91] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5178,7 +5239,7 @@ func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleResponse.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{90} + return file_volume_server_proto_rawDescGZIP(), []int{91} } func (x *FetchAndWriteNeedleResponse) GetETag() string { @@ -5202,7 +5263,7 @@ type QueryRequest struct { func (x *QueryRequest) Reset() { *x = QueryRequest{} - mi := &file_volume_server_proto_msgTypes[91] + mi := &file_volume_server_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5214,7 +5275,7 @@ func (x *QueryRequest) String() string { func (*QueryRequest) ProtoMessage() {} func (x *QueryRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[91] + mi := &file_volume_server_proto_msgTypes[92] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5227,7 +5288,7 @@ func (x *QueryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. func (*QueryRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91} + return file_volume_server_proto_rawDescGZIP(), []int{92} } func (x *QueryRequest) GetSelections() []string { @@ -5274,7 +5335,7 @@ type QueriedStripe struct { func (x *QueriedStripe) Reset() { *x = QueriedStripe{} - mi := &file_volume_server_proto_msgTypes[92] + mi := &file_volume_server_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5286,7 +5347,7 @@ func (x *QueriedStripe) String() string { func (*QueriedStripe) ProtoMessage() {} func (x *QueriedStripe) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[92] + mi := &file_volume_server_proto_msgTypes[93] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5299,7 +5360,7 @@ func (x *QueriedStripe) ProtoReflect() protoreflect.Message { // Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. func (*QueriedStripe) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92} + return file_volume_server_proto_rawDescGZIP(), []int{93} } func (x *QueriedStripe) GetRecords() []byte { @@ -5319,7 +5380,7 @@ type VolumeNeedleStatusRequest struct { func (x *VolumeNeedleStatusRequest) Reset() { *x = VolumeNeedleStatusRequest{} - mi := &file_volume_server_proto_msgTypes[93] + mi := &file_volume_server_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5331,7 +5392,7 @@ func (x *VolumeNeedleStatusRequest) String() string { func (*VolumeNeedleStatusRequest) ProtoMessage() {} func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[93] + mi := &file_volume_server_proto_msgTypes[94] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5344,7 +5405,7 @@ func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{93} + return file_volume_server_proto_rawDescGZIP(), []int{94} } func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { @@ -5375,7 +5436,7 @@ type VolumeNeedleStatusResponse struct { func (x *VolumeNeedleStatusResponse) Reset() { *x = VolumeNeedleStatusResponse{} - mi := &file_volume_server_proto_msgTypes[94] + mi := &file_volume_server_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5387,7 +5448,7 @@ func (x *VolumeNeedleStatusResponse) String() string { func (*VolumeNeedleStatusResponse) ProtoMessage() {} func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[94] + mi := &file_volume_server_proto_msgTypes[95] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5400,7 +5461,7 @@ func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{94} + return file_volume_server_proto_rawDescGZIP(), []int{95} } func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { @@ -5455,7 +5516,7 @@ type PingRequest struct { func (x *PingRequest) Reset() { *x = PingRequest{} - mi := &file_volume_server_proto_msgTypes[95] + mi := &file_volume_server_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5467,7 +5528,7 @@ func (x *PingRequest) String() string { func (*PingRequest) ProtoMessage() {} func (x *PingRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[95] + mi := &file_volume_server_proto_msgTypes[96] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5480,7 +5541,7 @@ func (x *PingRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. func (*PingRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{95} + return file_volume_server_proto_rawDescGZIP(), []int{96} } func (x *PingRequest) GetTarget() string { @@ -5508,7 +5569,7 @@ type PingResponse struct { func (x *PingResponse) Reset() { *x = PingResponse{} - mi := &file_volume_server_proto_msgTypes[96] + mi := &file_volume_server_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5520,7 +5581,7 @@ func (x *PingResponse) String() string { func (*PingResponse) ProtoMessage() {} func (x *PingResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[96] + mi := &file_volume_server_proto_msgTypes[97] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5533,7 +5594,7 @@ func (x *PingResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. func (*PingResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{96} + return file_volume_server_proto_rawDescGZIP(), []int{97} } func (x *PingResponse) GetStartTimeNs() int64 { @@ -5568,7 +5629,7 @@ type FetchAndWriteNeedleRequest_Replica struct { func (x *FetchAndWriteNeedleRequest_Replica) Reset() { *x = FetchAndWriteNeedleRequest_Replica{} - mi := &file_volume_server_proto_msgTypes[97] + mi := &file_volume_server_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5580,7 +5641,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) String() string { func (*FetchAndWriteNeedleRequest_Replica) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[97] + mi := &file_volume_server_proto_msgTypes[98] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5593,7 +5654,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message // Deprecated: Use FetchAndWriteNeedleRequest_Replica.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest_Replica) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{89, 0} + return file_volume_server_proto_rawDescGZIP(), []int{90, 0} } func (x *FetchAndWriteNeedleRequest_Replica) GetUrl() string { @@ -5628,7 +5689,7 @@ type QueryRequest_Filter struct { func (x *QueryRequest_Filter) Reset() { *x = QueryRequest_Filter{} - mi := &file_volume_server_proto_msgTypes[98] + mi := &file_volume_server_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5640,7 +5701,7 @@ func (x *QueryRequest_Filter) String() string { func (*QueryRequest_Filter) ProtoMessage() {} func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[98] + mi := &file_volume_server_proto_msgTypes[99] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5653,7 +5714,7 @@ func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 0} + return file_volume_server_proto_rawDescGZIP(), []int{92, 0} } func (x *QueryRequest_Filter) GetField() string { @@ -5690,7 +5751,7 @@ type QueryRequest_InputSerialization struct { func (x *QueryRequest_InputSerialization) Reset() { *x = QueryRequest_InputSerialization{} - mi := &file_volume_server_proto_msgTypes[99] + mi := &file_volume_server_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5702,7 +5763,7 @@ func (x *QueryRequest_InputSerialization) String() string { func (*QueryRequest_InputSerialization) ProtoMessage() {} func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[99] + mi := &file_volume_server_proto_msgTypes[100] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5715,7 +5776,7 @@ func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1} } func (x *QueryRequest_InputSerialization) GetCompressionType() string { @@ -5756,7 +5817,7 @@ type QueryRequest_OutputSerialization struct { func (x *QueryRequest_OutputSerialization) Reset() { *x = QueryRequest_OutputSerialization{} - mi := &file_volume_server_proto_msgTypes[100] + mi := &file_volume_server_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5768,7 +5829,7 @@ func (x *QueryRequest_OutputSerialization) String() string { func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[100] + mi := &file_volume_server_proto_msgTypes[101] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5781,7 +5842,7 @@ func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2} + return file_volume_server_proto_rawDescGZIP(), []int{92, 2} } func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -5814,7 +5875,7 @@ type QueryRequest_InputSerialization_CSVInput struct { func (x *QueryRequest_InputSerialization_CSVInput) Reset() { *x = QueryRequest_InputSerialization_CSVInput{} - mi := &file_volume_server_proto_msgTypes[101] + mi := &file_volume_server_proto_msgTypes[102] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5826,7 +5887,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) String() string { func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[101] + mi := &file_volume_server_proto_msgTypes[102] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5839,7 +5900,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.M // Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 0} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 0} } func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -5900,7 +5961,7 @@ type QueryRequest_InputSerialization_JSONInput struct { func (x *QueryRequest_InputSerialization_JSONInput) Reset() { *x = QueryRequest_InputSerialization_JSONInput{} - mi := &file_volume_server_proto_msgTypes[102] + mi := &file_volume_server_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5912,7 +5973,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) String() string { func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[102] + mi := &file_volume_server_proto_msgTypes[103] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5925,7 +5986,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect. // Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 1} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 1} } func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -5943,7 +6004,7 @@ type QueryRequest_InputSerialization_ParquetInput struct { func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { *x = QueryRequest_InputSerialization_ParquetInput{} - mi := &file_volume_server_proto_msgTypes[103] + mi := &file_volume_server_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5955,7 +6016,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) String() string { func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[103] + mi := &file_volume_server_proto_msgTypes[104] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5968,7 +6029,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protorefle // Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 2} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { @@ -5984,7 +6045,7 @@ type QueryRequest_OutputSerialization_CSVOutput struct { func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { *x = QueryRequest_OutputSerialization_CSVOutput{} - mi := &file_volume_server_proto_msgTypes[104] + mi := &file_volume_server_proto_msgTypes[105] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5996,7 +6057,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[104] + mi := &file_volume_server_proto_msgTypes[105] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6009,7 +6070,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect // Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 0} + return file_volume_server_proto_rawDescGZIP(), []int{92, 2, 0} } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -6056,7 +6117,7 @@ type QueryRequest_OutputSerialization_JSONOutput struct { func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { *x = QueryRequest_OutputSerialization_JSONOutput{} - mi := &file_volume_server_proto_msgTypes[105] + mi := &file_volume_server_proto_msgTypes[106] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6068,7 +6129,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[105] + mi := &file_volume_server_proto_msgTypes[106] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6081,7 +6142,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflec // Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 1} + return file_volume_server_proto_rawDescGZIP(), []int{92, 2, 1} } func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -6423,7 +6484,7 @@ const file_volume_server_proto_rawDesc = "" + "\x06offset\x18\x04 \x01(\x04R\x06offset\x12\x1b\n" + "\tfile_size\x18\x05 \x01(\x04R\bfileSize\x12#\n" + "\rmodified_time\x18\x06 \x01(\x04R\fmodifiedTime\x12\x1c\n" + - "\textension\x18\a \x01(\tR\textension\"\x84\x02\n" + + "\textension\x18\a \x01(\tR\textension\"\xcd\x02\n" + "\n" + "VolumeInfo\x122\n" + "\x05files\x18\x01 \x03(\v2\x1c.volume_server_pb.RemoteFileR\x05files\x12\x18\n" + @@ -6432,7 +6493,12 @@ const file_volume_server_proto_rawDesc = "" + "\fbytes_offset\x18\x04 \x01(\rR\vbytesOffset\x12\"\n" + "\rdat_file_size\x18\x05 \x01(\x03R\vdatFileSize\x12\"\n" + "\rexpire_at_sec\x18\x06 \x01(\x04R\vexpireAtSec\x12\x1b\n" + - "\tread_only\x18\a \x01(\bR\breadOnly\"\x8b\x02\n" + + "\tread_only\x18\a \x01(\bR\breadOnly\x12G\n" + + "\x0fec_shard_config\x18\b \x01(\v2\x1f.volume_server_pb.EcShardConfigR\recShardConfig\"U\n" + + "\rEcShardConfig\x12\x1f\n" + + "\vdata_shards\x18\x01 \x01(\rR\n" + + "dataShards\x12#\n" + + "\rparity_shards\x18\x02 \x01(\rR\fparityShards\"\x8b\x02\n" + "\x14OldVersionVolumeInfo\x122\n" + "\x05files\x18\x01 \x03(\v2\x1c.volume_server_pb.RemoteFileR\x05files\x12\x18\n" + "\aversion\x18\x02 \x01(\rR\aversion\x12 \n" + @@ -6611,7 +6677,7 @@ func file_volume_server_proto_rawDescGZIP() []byte { return file_volume_server_proto_rawDescData } -var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 106) +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 107) var file_volume_server_proto_goTypes = []any{ (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse @@ -6693,34 +6759,35 @@ var file_volume_server_proto_goTypes = []any{ (*MemStatus)(nil), // 77: volume_server_pb.MemStatus (*RemoteFile)(nil), // 78: volume_server_pb.RemoteFile (*VolumeInfo)(nil), // 79: volume_server_pb.VolumeInfo - (*OldVersionVolumeInfo)(nil), // 80: volume_server_pb.OldVersionVolumeInfo - (*VolumeTierMoveDatToRemoteRequest)(nil), // 81: volume_server_pb.VolumeTierMoveDatToRemoteRequest - (*VolumeTierMoveDatToRemoteResponse)(nil), // 82: volume_server_pb.VolumeTierMoveDatToRemoteResponse - (*VolumeTierMoveDatFromRemoteRequest)(nil), // 83: volume_server_pb.VolumeTierMoveDatFromRemoteRequest - (*VolumeTierMoveDatFromRemoteResponse)(nil), // 84: volume_server_pb.VolumeTierMoveDatFromRemoteResponse - (*VolumeServerStatusRequest)(nil), // 85: volume_server_pb.VolumeServerStatusRequest - (*VolumeServerStatusResponse)(nil), // 86: volume_server_pb.VolumeServerStatusResponse - (*VolumeServerLeaveRequest)(nil), // 87: volume_server_pb.VolumeServerLeaveRequest - (*VolumeServerLeaveResponse)(nil), // 88: volume_server_pb.VolumeServerLeaveResponse - (*FetchAndWriteNeedleRequest)(nil), // 89: volume_server_pb.FetchAndWriteNeedleRequest - (*FetchAndWriteNeedleResponse)(nil), // 90: volume_server_pb.FetchAndWriteNeedleResponse - (*QueryRequest)(nil), // 91: volume_server_pb.QueryRequest - (*QueriedStripe)(nil), // 92: volume_server_pb.QueriedStripe - (*VolumeNeedleStatusRequest)(nil), // 93: volume_server_pb.VolumeNeedleStatusRequest - (*VolumeNeedleStatusResponse)(nil), // 94: volume_server_pb.VolumeNeedleStatusResponse - (*PingRequest)(nil), // 95: volume_server_pb.PingRequest - (*PingResponse)(nil), // 96: volume_server_pb.PingResponse - (*FetchAndWriteNeedleRequest_Replica)(nil), // 97: volume_server_pb.FetchAndWriteNeedleRequest.Replica - (*QueryRequest_Filter)(nil), // 98: volume_server_pb.QueryRequest.Filter - (*QueryRequest_InputSerialization)(nil), // 99: volume_server_pb.QueryRequest.InputSerialization - (*QueryRequest_OutputSerialization)(nil), // 100: volume_server_pb.QueryRequest.OutputSerialization - (*QueryRequest_InputSerialization_CSVInput)(nil), // 101: volume_server_pb.QueryRequest.InputSerialization.CSVInput - (*QueryRequest_InputSerialization_JSONInput)(nil), // 102: volume_server_pb.QueryRequest.InputSerialization.JSONInput - (*QueryRequest_InputSerialization_ParquetInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.ParquetInput - (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 104: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 105: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - (*remote_pb.RemoteConf)(nil), // 106: remote_pb.RemoteConf - (*remote_pb.RemoteStorageLocation)(nil), // 107: remote_pb.RemoteStorageLocation + (*EcShardConfig)(nil), // 80: volume_server_pb.EcShardConfig + (*OldVersionVolumeInfo)(nil), // 81: volume_server_pb.OldVersionVolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 82: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 83: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 84: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 85: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 86: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 87: volume_server_pb.VolumeServerStatusResponse + (*VolumeServerLeaveRequest)(nil), // 88: volume_server_pb.VolumeServerLeaveRequest + (*VolumeServerLeaveResponse)(nil), // 89: volume_server_pb.VolumeServerLeaveResponse + (*FetchAndWriteNeedleRequest)(nil), // 90: volume_server_pb.FetchAndWriteNeedleRequest + (*FetchAndWriteNeedleResponse)(nil), // 91: volume_server_pb.FetchAndWriteNeedleResponse + (*QueryRequest)(nil), // 92: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 93: volume_server_pb.QueriedStripe + (*VolumeNeedleStatusRequest)(nil), // 94: volume_server_pb.VolumeNeedleStatusRequest + (*VolumeNeedleStatusResponse)(nil), // 95: volume_server_pb.VolumeNeedleStatusResponse + (*PingRequest)(nil), // 96: volume_server_pb.PingRequest + (*PingResponse)(nil), // 97: volume_server_pb.PingResponse + (*FetchAndWriteNeedleRequest_Replica)(nil), // 98: volume_server_pb.FetchAndWriteNeedleRequest.Replica + (*QueryRequest_Filter)(nil), // 99: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 100: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 101: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 102: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 104: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 105: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 106: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + (*remote_pb.RemoteConf)(nil), // 107: remote_pb.RemoteConf + (*remote_pb.RemoteStorageLocation)(nil), // 108: remote_pb.RemoteStorageLocation } var file_volume_server_proto_depIdxs = []int32{ 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult @@ -6728,113 +6795,114 @@ var file_volume_server_proto_depIdxs = []int32{ 73, // 2: volume_server_pb.VolumeEcShardsInfoResponse.ec_shard_infos:type_name -> volume_server_pb.EcShardInfo 79, // 3: volume_server_pb.ReadVolumeFileStatusResponse.volume_info:type_name -> volume_server_pb.VolumeInfo 78, // 4: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 78, // 5: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 76, // 6: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus - 77, // 7: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus - 97, // 8: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica - 106, // 9: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf - 107, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation - 98, // 11: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter - 99, // 12: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization - 100, // 13: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization - 101, // 14: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput - 102, // 15: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput - 103, // 16: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput - 104, // 17: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - 105, // 18: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - 0, // 19: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest - 4, // 20: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest - 6, // 21: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest - 8, // 22: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest - 10, // 23: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest - 12, // 24: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest - 14, // 25: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest - 16, // 26: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest - 18, // 27: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest - 20, // 28: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest - 22, // 29: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest - 24, // 30: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest - 26, // 31: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest - 28, // 32: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest - 30, // 33: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest - 32, // 34: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest - 34, // 35: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest - 74, // 36: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest - 36, // 37: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest - 38, // 38: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest - 41, // 39: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest - 43, // 40: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest - 45, // 41: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest - 47, // 42: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest - 49, // 43: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest - 51, // 44: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest - 53, // 45: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest - 55, // 46: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest - 57, // 47: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest - 59, // 48: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest - 61, // 49: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest - 63, // 50: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest - 65, // 51: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest - 67, // 52: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest - 69, // 53: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest - 71, // 54: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest - 81, // 55: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest - 83, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest - 85, // 57: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest - 87, // 58: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest - 89, // 59: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest - 91, // 60: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest - 93, // 61: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest - 95, // 62: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest - 1, // 63: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse - 5, // 64: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse - 7, // 65: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse - 9, // 66: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse - 11, // 67: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse - 13, // 68: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse - 15, // 69: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse - 17, // 70: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse - 19, // 71: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse - 21, // 72: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse - 23, // 73: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse - 25, // 74: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse - 27, // 75: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse - 29, // 76: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse - 31, // 77: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse - 33, // 78: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse - 35, // 79: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse - 75, // 80: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse - 37, // 81: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse - 40, // 82: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse - 42, // 83: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse - 44, // 84: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse - 46, // 85: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse - 48, // 86: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse - 50, // 87: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse - 52, // 88: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse - 54, // 89: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse - 56, // 90: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse - 58, // 91: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse - 60, // 92: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse - 62, // 93: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse - 64, // 94: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse - 66, // 95: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse - 68, // 96: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse - 70, // 97: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse - 72, // 98: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse - 82, // 99: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse - 84, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse - 86, // 101: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse - 88, // 102: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse - 90, // 103: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse - 92, // 104: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe - 94, // 105: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse - 96, // 106: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse - 63, // [63:107] is the sub-list for method output_type - 19, // [19:63] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 80, // 5: volume_server_pb.VolumeInfo.ec_shard_config:type_name -> volume_server_pb.EcShardConfig + 78, // 6: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 76, // 7: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 77, // 8: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 98, // 9: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica + 107, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf + 108, // 11: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation + 99, // 12: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 100, // 13: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 101, // 14: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 102, // 15: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 103, // 16: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 104, // 17: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 105, // 18: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 106, // 19: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 0, // 20: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest + 4, // 21: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest + 6, // 22: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest + 8, // 23: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest + 10, // 24: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest + 12, // 25: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest + 14, // 26: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest + 16, // 27: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest + 18, // 28: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest + 20, // 29: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest + 22, // 30: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest + 24, // 31: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest + 26, // 32: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest + 28, // 33: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest + 30, // 34: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest + 32, // 35: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest + 34, // 36: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest + 74, // 37: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 36, // 38: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest + 38, // 39: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest + 41, // 40: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest + 43, // 41: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest + 45, // 42: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest + 47, // 43: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest + 49, // 44: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 51, // 45: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 53, // 46: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 55, // 47: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 57, // 48: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 59, // 49: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 61, // 50: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 63, // 51: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 65, // 52: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 67, // 53: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 69, // 54: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 71, // 55: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest + 82, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 84, // 57: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 86, // 58: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 88, // 59: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest + 90, // 60: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest + 92, // 61: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 94, // 62: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest + 96, // 63: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest + 1, // 64: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 5, // 65: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 7, // 66: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 9, // 67: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 11, // 68: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 13, // 69: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 15, // 70: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 17, // 71: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 19, // 72: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 21, // 73: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 23, // 74: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 25, // 75: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 27, // 76: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 29, // 77: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse + 31, // 78: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 33, // 79: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse + 35, // 80: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 75, // 81: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 37, // 82: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 40, // 83: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse + 42, // 84: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse + 44, // 85: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse + 46, // 86: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse + 48, // 87: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse + 50, // 88: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 52, // 89: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 54, // 90: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 56, // 91: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 58, // 92: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 60, // 93: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 62, // 94: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 64, // 95: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 66, // 96: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 68, // 97: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 70, // 98: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 72, // 99: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse + 83, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 85, // 101: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 87, // 102: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 89, // 103: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse + 91, // 104: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse + 93, // 105: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 95, // 106: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse + 97, // 107: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse + 64, // [64:108] is the sub-list for method output_type + 20, // [20:64] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_volume_server_proto_init() } @@ -6852,7 +6920,7 @@ func file_volume_server_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_volume_server_proto_rawDesc), len(file_volume_server_proto_rawDesc)), NumEnums: 0, - NumMessages: 106, + NumMessages: 107, NumExtensions: 0, NumServices: 1, }, diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 88e94115d..5d100bdda 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -50,20 +50,38 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) } + // Create EC context - prefer existing .vif config if present (for regeneration scenarios) + ecCtx := erasure_coding.NewDefaultECContext(req.Collection, needle.VolumeId(req.VolumeId)) + if volumeInfo, _, found, _ := volume_info.MaybeLoadVolumeInfo(baseFileName + ".vif"); found && volumeInfo.EcShardConfig != nil { + ds := int(volumeInfo.EcShardConfig.DataShards) + ps := int(volumeInfo.EcShardConfig.ParityShards) + + // Validate and use existing EC config + if ds > 0 && ps > 0 && ds+ps <= erasure_coding.MaxShardCount { + ecCtx.DataShards = ds + ecCtx.ParityShards = ps + glog.V(0).Infof("Using existing EC config for volume %d: %s", req.VolumeId, ecCtx.String()) + } else { + glog.Warningf("Invalid EC config in .vif for volume %d (data=%d, parity=%d), using defaults", req.VolumeId, ds, ps) + } + } else { + glog.V(0).Infof("Using default EC config for volume %d: %s", req.VolumeId, ecCtx.String()) + } + shouldCleanup := true defer func() { if !shouldCleanup { return } - for i := 0; i < erasure_coding.TotalShardsCount; i++ { - os.Remove(fmt.Sprintf("%s.ec%2d", baseFileName, i)) + for i := 0; i < ecCtx.Total(); i++ { + os.Remove(baseFileName + ecCtx.ToExt(i)) } os.Remove(v.IndexFileName() + ".ecx") }() - // write .ec00 ~ .ec13 files - if err := erasure_coding.WriteEcFiles(baseFileName); err != nil { - return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + // write .ec00 ~ .ec[TotalShards-1] files using context + if err := erasure_coding.WriteEcFilesWithContext(baseFileName, ecCtx); err != nil { + return nil, fmt.Errorf("WriteEcFilesWithContext %s: %v", baseFileName, err) } // write .ecx file @@ -84,6 +102,21 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ datSize, _, _ := v.FileStat() volumeInfo.DatFileSize = int64(datSize) + + // Validate EC configuration before saving to .vif + if ecCtx.DataShards <= 0 || ecCtx.ParityShards <= 0 || ecCtx.Total() > erasure_coding.MaxShardCount { + return nil, fmt.Errorf("invalid EC config before saving: data=%d, parity=%d, total=%d (max=%d)", + ecCtx.DataShards, ecCtx.ParityShards, ecCtx.Total(), erasure_coding.MaxShardCount) + } + + // Save EC configuration to VolumeInfo + volumeInfo.EcShardConfig = &volume_server_pb.EcShardConfig{ + DataShards: uint32(ecCtx.DataShards), + ParityShards: uint32(ecCtx.ParityShards), + } + glog.V(1).Infof("Saving EC config to .vif for volume %d: %d+%d (total: %d)", + req.VolumeId, ecCtx.DataShards, ecCtx.ParityShards, ecCtx.Total()) + if err := volume_info.SaveVolumeInfo(baseFileName+".vif", volumeInfo); err != nil { return nil, fmt.Errorf("SaveVolumeInfo %s: %v", baseFileName, err) } @@ -442,9 +475,10 @@ func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_ glog.V(0).Infof("VolumeEcShardsToVolume: %v", req) - // collect .ec00 ~ .ec09 files - shardFileNames := make([]string, erasure_coding.DataShardsCount) - v, found := vs.store.CollectEcShards(needle.VolumeId(req.VolumeId), shardFileNames) + // Collect all EC shards (NewEcVolume will load EC config from .vif into v.ECContext) + // Use MaxShardCount (32) to support custom EC ratios up to 32 total shards + tempShards := make([]string, erasure_coding.MaxShardCount) + v, found := vs.store.CollectEcShards(needle.VolumeId(req.VolumeId), tempShards) if !found { return nil, fmt.Errorf("ec volume %d not found", req.VolumeId) } @@ -453,7 +487,19 @@ func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_ return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) } - for shardId := 0; shardId < erasure_coding.DataShardsCount; shardId++ { + // Use EC context (already loaded from .vif) to determine data shard count + dataShards := v.ECContext.DataShards + + // Defensive validation to prevent panics from corrupted ECContext + if dataShards <= 0 || dataShards > erasure_coding.MaxShardCount { + return nil, fmt.Errorf("invalid data shard count %d for volume %d (must be 1..%d)", dataShards, req.VolumeId, erasure_coding.MaxShardCount) + } + + shardFileNames := tempShards[:dataShards] + glog.V(1).Infof("Using EC config from volume %d: %d data shards", req.VolumeId, dataShards) + + // Verify all data shards are present + for shardId := 0; shardId < dataShards; shardId++ { if shardFileNames[shardId] == "" { return nil, fmt.Errorf("ec volume %d missing shard %d", req.VolumeId, shardId) } diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index 665daa1b8..f059b4e74 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -622,7 +622,8 @@ func (ecb *ecBalancer) deleteDuplicatedEcShards(collection string) error { func (ecb *ecBalancer) doDeduplicateEcShards(collection string, vid needle.VolumeId, locations []*EcNode) error { // check whether this volume has ecNodes that are over average - shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount) + // Use MaxShardCount (32) to support custom EC ratios + shardToLocations := make([][]*EcNode, erasure_coding.MaxShardCount) for _, ecNode := range locations { shardBits := findEcVolumeShards(ecNode, vid) for _, shardId := range shardBits.ShardIds() { @@ -677,11 +678,16 @@ func countShardsByRack(vid needle.VolumeId, locations []*EcNode) map[string]int func (ecb *ecBalancer) doBalanceEcShardsAcrossRacks(collection string, vid needle.VolumeId, locations []*EcNode) error { racks := ecb.racks() - // calculate average number of shards an ec rack should have for one volume - averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks)) - // see the volume's shards are in how many racks, and how many in each rack rackToShardCount := countShardsByRack(vid, locations) + + // Calculate actual total shards for this volume (not hardcoded default) + var totalShardsForVolume int + for _, count := range rackToShardCount { + totalShardsForVolume += count + } + // calculate average number of shards an ec rack should have for one volume + averageShardsPerEcRack := ceilDivide(totalShardsForVolume, len(racks)) rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string { return string(ecNode.rack) }) diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index 8cae77434..cceaa1899 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -264,7 +264,8 @@ func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) { if shardInfo.Collection == collection { existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)] if !found { - existing = make([][]*EcNode, erasure_coding.TotalShardsCount) + // Use MaxShardCount (32) to support custom EC ratios + existing = make([][]*EcNode, erasure_coding.MaxShardCount) ecShardMap[needle.VolumeId(shardInfo.Id)] = existing } for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() { diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index 128bfd26f..b370555da 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -16,7 +16,9 @@ import ( ) var ( - re = regexp.MustCompile(`\.ec[0-9][0-9]`) + // Match .ec00 through .ec999 (currently only .ec00-.ec31 are used) + // Using \d{2,3} for future-proofing if MaxShardCount is ever increased beyond 99 + re = regexp.MustCompile(`\.ec\d{2,3}`) ) func (l *DiskLocation) FindEcVolume(vid needle.VolumeId) (*erasure_coding.EcVolume, bool) { @@ -398,8 +400,8 @@ func (l *DiskLocation) validateEcVolume(collection string, vid needle.VolumeId) var actualShardSize int64 = -1 // Count shards and validate they all have the same size (required for Reed-Solomon EC) - // Shard files (.ec00 - .ec13) are always in l.Directory, not l.IdxDirectory - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + // Check up to MaxShardCount (32) to support custom EC ratios + for i := 0; i < erasure_coding.MaxShardCount; i++ { shardFileName := baseFileName + erasure_coding.ToExt(i) fi, err := os.Stat(shardFileName) @@ -472,8 +474,9 @@ func (l *DiskLocation) removeEcVolumeFiles(collection string, vid needle.VolumeI removeFile(indexBaseFileName+".ecx", "EC index file") removeFile(indexBaseFileName+".ecj", "EC journal file") - // Remove all EC shard files (.ec00 ~ .ec13) from data directory - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + // Remove all EC shard files (.ec00 ~ .ec31) from data directory + // Use MaxShardCount (32) to support custom EC ratios + for i := 0; i < erasure_coding.MaxShardCount; i++ { removeFile(baseFileName+erasure_coding.ToExt(i), "EC shard file") } } diff --git a/weed/storage/erasure_coding/ec_context.go b/weed/storage/erasure_coding/ec_context.go new file mode 100644 index 000000000..770fe41af --- /dev/null +++ b/weed/storage/erasure_coding/ec_context.go @@ -0,0 +1,46 @@ +package erasure_coding + +import ( + "fmt" + + "github.com/klauspost/reedsolomon" + "github.com/seaweedfs/seaweedfs/weed/storage/needle" +) + +// ECContext encapsulates erasure coding parameters for encoding/decoding operations +type ECContext struct { + DataShards int + ParityShards int + Collection string + VolumeId needle.VolumeId +} + +// Total returns the total number of shards (data + parity) +func (ctx *ECContext) Total() int { + return ctx.DataShards + ctx.ParityShards +} + +// NewDefaultECContext creates a context with default 10+4 shard configuration +func NewDefaultECContext(collection string, volumeId needle.VolumeId) *ECContext { + return &ECContext{ + DataShards: DataShardsCount, + ParityShards: ParityShardsCount, + Collection: collection, + VolumeId: volumeId, + } +} + +// CreateEncoder creates a Reed-Solomon encoder for this context +func (ctx *ECContext) CreateEncoder() (reedsolomon.Encoder, error) { + return reedsolomon.New(ctx.DataShards, ctx.ParityShards) +} + +// ToExt returns the file extension for a given shard index +func (ctx *ECContext) ToExt(shardIndex int) string { + return fmt.Sprintf(".ec%02d", shardIndex) +} + +// String returns a human-readable representation of the EC configuration +func (ctx *ECContext) String() string { + return fmt.Sprintf("%d+%d (total: %d)", ctx.DataShards, ctx.ParityShards, ctx.Total()) +} diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index eeeb156e6..81ebffdcb 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -11,6 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" "github.com/seaweedfs/seaweedfs/weed/storage/types" + "github.com/seaweedfs/seaweedfs/weed/storage/volume_info" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -18,6 +19,7 @@ const ( DataShardsCount = 10 ParityShardsCount = 4 TotalShardsCount = DataShardsCount + ParityShardsCount + MaxShardCount = 32 // Maximum number of shards since ShardBits is uint32 (bits 0-31) MinTotalDisks = TotalShardsCount/ParityShardsCount + 1 ErasureCodingLargeBlockSize = 1024 * 1024 * 1024 // 1GB ErasureCodingSmallBlockSize = 1024 * 1024 // 1MB @@ -54,20 +56,53 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { return nil } -// WriteEcFiles generates .ec00 ~ .ec13 files +// WriteEcFiles generates .ec00 ~ .ec13 files using default EC context func WriteEcFiles(baseFileName string) error { - return generateEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize) + ctx := NewDefaultECContext("", 0) + return WriteEcFilesWithContext(baseFileName, ctx) +} + +// WriteEcFilesWithContext generates EC files using the provided context +func WriteEcFilesWithContext(baseFileName string, ctx *ECContext) error { + return generateEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, ctx) } func RebuildEcFiles(baseFileName string) ([]uint32, error) { - return generateMissingEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize) + // Attempt to load EC config from .vif file to preserve original configuration + var ctx *ECContext + if volumeInfo, _, found, _ := volume_info.MaybeLoadVolumeInfo(baseFileName + ".vif"); found && volumeInfo.EcShardConfig != nil { + ds := int(volumeInfo.EcShardConfig.DataShards) + ps := int(volumeInfo.EcShardConfig.ParityShards) + + // Validate EC config before using it + if ds > 0 && ps > 0 && ds+ps <= MaxShardCount { + ctx = &ECContext{ + DataShards: ds, + ParityShards: ps, + } + glog.V(0).Infof("Rebuilding EC files for %s with config from .vif: %s", baseFileName, ctx.String()) + } else { + glog.Warningf("Invalid EC config in .vif for %s (data=%d, parity=%d), using default", baseFileName, ds, ps) + ctx = NewDefaultECContext("", 0) + } + } else { + glog.V(0).Infof("Rebuilding EC files for %s with default config", baseFileName) + ctx = NewDefaultECContext("", 0) + } + + return RebuildEcFilesWithContext(baseFileName, ctx) +} + +// RebuildEcFilesWithContext rebuilds missing EC files using the provided context +func RebuildEcFilesWithContext(baseFileName string, ctx *ECContext) ([]uint32, error) { + return generateMissingEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, ctx) } func ToExt(ecIndex int) string { return fmt.Sprintf(".ec%02d", ecIndex) } -func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) error { +func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64, ctx *ECContext) error { file, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0) if err != nil { return fmt.Errorf("failed to open dat file: %w", err) @@ -79,21 +114,21 @@ func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, return fmt.Errorf("failed to stat dat file: %w", err) } - glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size()) - err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize) + glog.V(0).Infof("encodeDatFile %s.dat size:%d with EC context %s", baseFileName, fi.Size(), ctx.String()) + err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize, ctx) if err != nil { return fmt.Errorf("encodeDatFile: %w", err) } return nil } -func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) (generatedShardIds []uint32, err error) { +func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64, ctx *ECContext) (generatedShardIds []uint32, err error) { - shardHasData := make([]bool, TotalShardsCount) - inputFiles := make([]*os.File, TotalShardsCount) - outputFiles := make([]*os.File, TotalShardsCount) - for shardId := 0; shardId < TotalShardsCount; shardId++ { - shardFileName := baseFileName + ToExt(shardId) + shardHasData := make([]bool, ctx.Total()) + inputFiles := make([]*os.File, ctx.Total()) + outputFiles := make([]*os.File, ctx.Total()) + for shardId := 0; shardId < ctx.Total(); shardId++ { + shardFileName := baseFileName + ctx.ToExt(shardId) if util.FileExists(shardFileName) { shardHasData[shardId] = true inputFiles[shardId], err = os.OpenFile(shardFileName, os.O_RDONLY, 0) @@ -111,14 +146,14 @@ func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize } } - err = rebuildEcFiles(shardHasData, inputFiles, outputFiles) + err = rebuildEcFiles(shardHasData, inputFiles, outputFiles, ctx) if err != nil { return nil, fmt.Errorf("rebuildEcFiles: %w", err) } return } -func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error { +func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File, ctx *ECContext) error { bufferSize := int64(len(buffers[0])) if bufferSize == 0 { @@ -131,7 +166,7 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i } for b := int64(0); b < batchCount; b++ { - err := encodeDataOneBatch(file, enc, startOffset+b*bufferSize, blockSize, buffers, outputs) + err := encodeDataOneBatch(file, enc, startOffset+b*bufferSize, blockSize, buffers, outputs, ctx) if err != nil { return err } @@ -140,9 +175,9 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i return nil } -func openEcFiles(baseFileName string, forRead bool) (files []*os.File, err error) { - for i := 0; i < TotalShardsCount; i++ { - fname := baseFileName + ToExt(i) +func openEcFiles(baseFileName string, forRead bool, ctx *ECContext) (files []*os.File, err error) { + for i := 0; i < ctx.Total(); i++ { + fname := baseFileName + ctx.ToExt(i) openOption := os.O_TRUNC | os.O_CREATE | os.O_WRONLY if forRead { openOption = os.O_RDONLY @@ -164,10 +199,10 @@ func closeEcFiles(files []*os.File) { } } -func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error { +func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File, ctx *ECContext) error { // read data into buffers - for i := 0; i < DataShardsCount; i++ { + for i := 0; i < ctx.DataShards; i++ { n, err := file.ReadAt(buffers[i], startOffset+blockSize*int64(i)) if err != nil { if err != io.EOF { @@ -186,7 +221,7 @@ func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blo return err } - for i := 0; i < TotalShardsCount; i++ { + for i := 0; i < ctx.Total(); i++ { _, err := outputs[i].Write(buffers[i]) if err != nil { return err @@ -196,53 +231,57 @@ func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blo return nil } -func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, largeBlockSize int64, file *os.File, smallBlockSize int64) error { +func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, largeBlockSize int64, file *os.File, smallBlockSize int64, ctx *ECContext) error { var processedSize int64 - enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount) + enc, err := ctx.CreateEncoder() if err != nil { return fmt.Errorf("failed to create encoder: %w", err) } - buffers := make([][]byte, TotalShardsCount) + buffers := make([][]byte, ctx.Total()) for i := range buffers { buffers[i] = make([]byte, bufferSize) } - outputs, err := openEcFiles(baseFileName, false) + outputs, err := openEcFiles(baseFileName, false, ctx) defer closeEcFiles(outputs) if err != nil { return fmt.Errorf("failed to open ec files %s: %v", baseFileName, err) } - for remainingSize > largeBlockSize*DataShardsCount { - err = encodeData(file, enc, processedSize, largeBlockSize, buffers, outputs) + // Pre-calculate row sizes to avoid redundant calculations in loops + largeRowSize := largeBlockSize * int64(ctx.DataShards) + smallRowSize := smallBlockSize * int64(ctx.DataShards) + + for remainingSize >= largeRowSize { + err = encodeData(file, enc, processedSize, largeBlockSize, buffers, outputs, ctx) if err != nil { return fmt.Errorf("failed to encode large chunk data: %w", err) } - remainingSize -= largeBlockSize * DataShardsCount - processedSize += largeBlockSize * DataShardsCount + remainingSize -= largeRowSize + processedSize += largeRowSize } for remainingSize > 0 { - err = encodeData(file, enc, processedSize, smallBlockSize, buffers, outputs) + err = encodeData(file, enc, processedSize, smallBlockSize, buffers, outputs, ctx) if err != nil { return fmt.Errorf("failed to encode small chunk data: %w", err) } - remainingSize -= smallBlockSize * DataShardsCount - processedSize += smallBlockSize * DataShardsCount + remainingSize -= smallRowSize + processedSize += smallRowSize } return nil } -func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*os.File) error { +func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*os.File, ctx *ECContext) error { - enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount) + enc, err := ctx.CreateEncoder() if err != nil { return fmt.Errorf("failed to create encoder: %w", err) } - buffers := make([][]byte, TotalShardsCount) + buffers := make([][]byte, ctx.Total()) for i := range buffers { if shardHasData[i] { buffers[i] = make([]byte, ErasureCodingSmallBlockSize) @@ -254,7 +293,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o for { // read the input data from files - for i := 0; i < TotalShardsCount; i++ { + for i := 0; i < ctx.Total(); i++ { if shardHasData[i] { n, _ := inputFiles[i].ReadAt(buffers[i], startOffset) if n == 0 { @@ -278,7 +317,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } // write the data to output files - for i := 0; i < TotalShardsCount; i++ { + for i := 0; i < ctx.Total(); i++ { if !shardHasData[i] { n, _ := outputFiles[i].WriteAt(buffers[i][:inputBufferDataSize], startOffset) if inputBufferDataSize != n { diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go index b1cc9c441..cbb20832c 100644 --- a/weed/storage/erasure_coding/ec_test.go +++ b/weed/storage/erasure_coding/ec_test.go @@ -23,7 +23,10 @@ func TestEncodingDecoding(t *testing.T) { bufferSize := 50 baseFileName := "1" - err := generateEcFiles(baseFileName, bufferSize, largeBlockSize, smallBlockSize) + // Create default EC context for testing + ctx := NewDefaultECContext("", 0) + + err := generateEcFiles(baseFileName, bufferSize, largeBlockSize, smallBlockSize, ctx) if err != nil { t.Logf("generateEcFiles: %v", err) } @@ -33,16 +36,16 @@ func TestEncodingDecoding(t *testing.T) { t.Logf("WriteSortedFileFromIdx: %v", err) } - err = validateFiles(baseFileName) + err = validateFiles(baseFileName, ctx) if err != nil { t.Logf("WriteSortedFileFromIdx: %v", err) } - removeGeneratedFiles(baseFileName) + removeGeneratedFiles(baseFileName, ctx) } -func validateFiles(baseFileName string) error { +func validateFiles(baseFileName string, ctx *ECContext) error { nm, err := readNeedleMap(baseFileName) if err != nil { return fmt.Errorf("readNeedleMap: %v", err) @@ -60,7 +63,7 @@ func validateFiles(baseFileName string) error { return fmt.Errorf("failed to stat dat file: %v", err) } - ecFiles, err := openEcFiles(baseFileName, true) + ecFiles, err := openEcFiles(baseFileName, true, ctx) if err != nil { return fmt.Errorf("error opening ec files: %w", err) } @@ -184,9 +187,9 @@ func readFromFile(file *os.File, data []byte, ecFileOffset int64) (err error) { return } -func removeGeneratedFiles(baseFileName string) { - for i := 0; i < DataShardsCount+ParityShardsCount; i++ { - fname := fmt.Sprintf("%s.ec%02d", baseFileName, i) +func removeGeneratedFiles(baseFileName string, ctx *ECContext) { + for i := 0; i < ctx.Total(); i++ { + fname := baseFileName + ctx.ToExt(i) os.Remove(fname) } os.Remove(baseFileName + ".ecx") diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index 839428e7b..3e323163e 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -41,7 +41,8 @@ type EcVolume struct { ecjFileAccessLock sync.Mutex diskType types.DiskType datFileSize int64 - ExpireAtSec uint64 //ec volume destroy time, calculated from the ec volume was created + ExpireAtSec uint64 //ec volume destroy time, calculated from the ec volume was created + ECContext *ECContext // EC encoding parameters } func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) { @@ -73,9 +74,32 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection ev.Version = needle.Version(volumeInfo.Version) ev.datFileSize = volumeInfo.DatFileSize ev.ExpireAtSec = volumeInfo.ExpireAtSec + + // Initialize EC context from .vif if present; fallback to defaults + if volumeInfo.EcShardConfig != nil { + ds := int(volumeInfo.EcShardConfig.DataShards) + ps := int(volumeInfo.EcShardConfig.ParityShards) + + // Validate shard counts to prevent zero or invalid values + if ds <= 0 || ps <= 0 || ds+ps > MaxShardCount { + glog.Warningf("Invalid EC config in VolumeInfo for volume %d (data=%d, parity=%d), using defaults", vid, ds, ps) + ev.ECContext = NewDefaultECContext(collection, vid) + } else { + ev.ECContext = &ECContext{ + Collection: collection, + VolumeId: vid, + DataShards: ds, + ParityShards: ps, + } + glog.V(1).Infof("Loaded EC config from VolumeInfo for volume %d: %s", vid, ev.ECContext.String()) + } + } else { + ev.ECContext = NewDefaultECContext(collection, vid) + } } else { glog.Warningf("vif file not found,volumeId:%d, filename:%s", vid, dataBaseFileName) volume_info.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) + ev.ECContext = NewDefaultECContext(collection, vid) } ev.ShardLocations = make(map[ShardId][]pb.ServerAddress) @@ -260,7 +284,7 @@ func (ev *EcVolume) LocateEcShardNeedleInterval(version needle.Version, offset i if ev.datFileSize > 0 { // To get the correct LargeBlockRowsCount // use datFileSize to calculate the shardSize to match the EC encoding logic. - shardSize = ev.datFileSize / DataShardsCount + shardSize = ev.datFileSize / int64(ev.ECContext.DataShards) } // calculate the locations in the ec shards intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, shardSize, offset, types.Size(needle.GetActualSize(size, version))) diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go index 53b352168..4d34ccbde 100644 --- a/weed/storage/erasure_coding/ec_volume_info.go +++ b/weed/storage/erasure_coding/ec_volume_info.go @@ -87,7 +87,7 @@ func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo { // Copy shard sizes for remaining shards retIndex := 0 - for shardId := ShardId(0); shardId < TotalShardsCount && retIndex < len(ret.ShardSizes); shardId++ { + for shardId := ShardId(0); shardId < ShardId(MaxShardCount) && retIndex < len(ret.ShardSizes); shardId++ { if ret.ShardBits.HasShardId(shardId) { if size, exists := ecInfo.GetShardSize(shardId); exists { ret.ShardSizes[retIndex] = size @@ -119,19 +119,28 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb. type ShardBits uint32 // use bits to indicate the shard id, use 32 bits just for possible future extension func (b ShardBits) AddShardId(id ShardId) ShardBits { + if id >= MaxShardCount { + return b // Reject out-of-range shard IDs + } return b | (1 << id) } func (b ShardBits) RemoveShardId(id ShardId) ShardBits { + if id >= MaxShardCount { + return b // Reject out-of-range shard IDs + } return b &^ (1 << id) } func (b ShardBits) HasShardId(id ShardId) bool { + if id >= MaxShardCount { + return false // Out-of-range shard IDs are never present + } return b&(1< 0 } func (b ShardBits) ShardIds() (ret []ShardId) { - for i := ShardId(0); i < TotalShardsCount; i++ { + for i := ShardId(0); i < ShardId(MaxShardCount); i++ { if b.HasShardId(i) { ret = append(ret, i) } @@ -140,7 +149,7 @@ func (b ShardBits) ShardIds() (ret []ShardId) { } func (b ShardBits) ToUint32Slice() (ret []uint32) { - for i := uint32(0); i < TotalShardsCount; i++ { + for i := uint32(0); i < uint32(MaxShardCount); i++ { if b.HasShardId(ShardId(i)) { ret = append(ret, i) } @@ -164,6 +173,8 @@ func (b ShardBits) Plus(other ShardBits) ShardBits { } func (b ShardBits) MinusParityShards() ShardBits { + // Removes parity shards from the bit mask + // Assumes default 10+4 EC layout where parity shards are IDs 10-13 for i := DataShardsCount; i < TotalShardsCount; i++ { b = b.RemoveShardId(ShardId(i)) } @@ -205,7 +216,7 @@ func (b ShardBits) IndexToShardId(index int) (shardId ShardId, found bool) { } currentIndex := 0 - for i := ShardId(0); i < TotalShardsCount; i++ { + for i := ShardId(0); i < ShardId(MaxShardCount); i++ { if b.HasShardId(i) { if currentIndex == index { return i, true @@ -234,7 +245,7 @@ func (ecInfo *EcVolumeInfo) resizeShardSizes(prevShardBits ShardBits) { // Copy existing sizes to new positions based on current ShardBits if len(ecInfo.ShardSizes) > 0 { newIndex := 0 - for shardId := ShardId(0); shardId < TotalShardsCount && newIndex < expectedLength; shardId++ { + for shardId := ShardId(0); shardId < ShardId(MaxShardCount) && newIndex < expectedLength; shardId++ { if ecInfo.ShardBits.HasShardId(shardId) { // Try to find the size for this shard in the old array using previous ShardBits if oldIndex, found := prevShardBits.ShardIdToIndex(shardId); found && oldIndex < len(ecInfo.ShardSizes) { diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 0126ad9d4..6a26b4ae0 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -350,7 +350,8 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum return 0, false, fmt.Errorf("failed to create encoder: %w", err) } - bufs := make([][]byte, erasure_coding.TotalShardsCount) + // Use MaxShardCount to support custom EC ratios up to 32 shards + bufs := make([][]byte, erasure_coding.MaxShardCount) var wg sync.WaitGroup ecVolume.ShardLocationsLock.RLock() diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go index 844e92f55..c8b511338 100644 --- a/weed/topology/topology_ec.go +++ b/weed/topology/topology_ec.go @@ -10,7 +10,8 @@ import ( type EcShardLocations struct { Collection string - Locations [erasure_coding.TotalShardsCount][]*DataNode + // Use MaxShardCount (32) to support custom EC ratios + Locations [erasure_coding.MaxShardCount][]*DataNode } func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) (newShards, deletedShards []*erasure_coding.EcVolumeInfo) { @@ -90,6 +91,10 @@ func NewEcShardLocations(collection string) *EcShardLocations { } func (loc *EcShardLocations) AddShard(shardId erasure_coding.ShardId, dn *DataNode) (added bool) { + // Defensive bounds check to prevent panic with out-of-range shard IDs + if int(shardId) >= erasure_coding.MaxShardCount { + return false + } dataNodes := loc.Locations[shardId] for _, n := range dataNodes { if n.Id() == dn.Id() { @@ -101,6 +106,10 @@ func (loc *EcShardLocations) AddShard(shardId erasure_coding.ShardId, dn *DataNo } func (loc *EcShardLocations) DeleteShard(shardId erasure_coding.ShardId, dn *DataNode) (deleted bool) { + // Defensive bounds check to prevent panic with out-of-range shard IDs + if int(shardId) >= erasure_coding.MaxShardCount { + return false + } dataNodes := loc.Locations[shardId] foundIndex := -1 for index, n := range dataNodes { diff --git a/weed/worker/tasks/erasure_coding/ec_task.go b/weed/worker/tasks/erasure_coding/ec_task.go index 18f192bc9..df7fc94f9 100644 --- a/weed/worker/tasks/erasure_coding/ec_task.go +++ b/weed/worker/tasks/erasure_coding/ec_task.go @@ -374,7 +374,8 @@ func (t *ErasureCodingTask) generateEcShardsLocally(localFiles map[string]string var generatedShards []string var totalShardSize int64 - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + // Check up to MaxShardCount (32) to support custom EC ratios + for i := 0; i < erasure_coding.MaxShardCount; i++ { shardFile := fmt.Sprintf("%s.ec%02d", baseName, i) if info, err := os.Stat(shardFile); err == nil { shardKey := fmt.Sprintf("ec%02d", i)