diff --git a/k8s/charts/seaweedfs/values.yaml b/k8s/charts/seaweedfs/values.yaml index cf16623c3..7961d9be4 100644 --- a/k8s/charts/seaweedfs/values.yaml +++ b/k8s/charts/seaweedfs/values.yaml @@ -873,7 +873,7 @@ filer: # anonymousRead: false s3: - enabled: true + enabled: false imageOverride: null restartPolicy: null replicas: 1 diff --git a/weed/admin/dash/ec_shard_management.go b/weed/admin/dash/ec_shard_management.go index 34574ecdb..82aa4074d 100644 --- a/weed/admin/dash/ec_shard_management.go +++ b/weed/admin/dash/ec_shard_management.go @@ -68,7 +68,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, // Create individual shard entries for each shard this server has shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { + for shardId := 0; shardId < erasure_coding.MaxShardCount; shardId++ { if (shardBits & (1 << uint(shardId))) != 0 { // Mark this shard as present for this volume volumeShardsMap[volumeId][shardId] = true @@ -112,6 +112,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, shardCount := len(shardsPresent) // Find which shards are missing for this volume across ALL servers + // Uses default 10+4 (14 total shards) for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { if !shardsPresent[shardId] { missingShards = append(missingShards, shardId) @@ -332,7 +333,7 @@ func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, // Process each shard this server has for this volume shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { + for shardId := 0; shardId < erasure_coding.MaxShardCount; shardId++ { if (shardBits & (1 << uint(shardId))) != 0 { // Record shard location volume.ShardLocations[shardId] = node.Id @@ -392,7 +393,7 @@ func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, for _, volume := range volumeData { volume.TotalShards = len(volume.ShardLocations) - // Find missing shards + // Find missing shards (default 10+4 = 14 total shards) var missingShards []int for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { if _, exists := volume.ShardLocations[shardId]; !exists { @@ -523,7 +524,7 @@ func sortEcVolumes(volumes []EcVolumeWithShards, sortBy string, sortOrder string // getShardCount returns the number of shards represented by the bitmap func getShardCount(ecIndexBits uint32) int { count := 0 - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + for i := 0; i < erasure_coding.MaxShardCount; i++ { if (ecIndexBits & (1 << uint(i))) != 0 { count++ } @@ -532,6 +533,7 @@ func getShardCount(ecIndexBits uint32) int { } // getMissingShards returns a slice of missing shard IDs for a volume +// Assumes default 10+4 EC configuration (14 total shards) func getMissingShards(ecIndexBits uint32) []int { var missing []int for i := 0; i < erasure_coding.TotalShardsCount; i++ { @@ -614,7 +616,7 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrd // Create individual shard entries for each shard this server has shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { + for shardId := 0; shardId < erasure_coding.MaxShardCount; shardId++ { if (shardBits & (1 << uint(shardId))) != 0 { ecShard := EcShardWithInfo{ VolumeID: ecShardInfo.Id, @@ -698,6 +700,7 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrd } totalUniqueShards := len(foundShards) + // Check completeness using default 10+4 (14 total shards) isComplete := (totalUniqueShards == erasure_coding.TotalShardsCount) // Calculate missing shards diff --git a/weed/admin/handlers/maintenance_handlers.go b/weed/admin/handlers/maintenance_handlers.go index 6335c4174..3c1b5e410 100644 --- a/weed/admin/handlers/maintenance_handlers.go +++ b/weed/admin/handlers/maintenance_handlers.go @@ -46,7 +46,6 @@ func (h *MaintenanceHandlers) ShowTaskDetail(c *gin.Context) { return } - c.Header("Content-Type", "text/html") taskDetailComponent := app.TaskDetail(taskDetail) layoutComponent := layout.Layout(c, taskDetailComponent) diff --git a/weed/admin/maintenance/maintenance_integration.go b/weed/admin/maintenance/maintenance_integration.go index 20f1ea97d..6ac28685e 100644 --- a/weed/admin/maintenance/maintenance_integration.go +++ b/weed/admin/maintenance/maintenance_integration.go @@ -306,25 +306,21 @@ func (s *MaintenanceIntegration) CanScheduleWithTaskSchedulers(task *Maintenance return false // Fallback to existing logic for unknown types } - // Convert task objects taskObject := s.convertTaskToTaskSystem(task) if taskObject == nil { return false } - runningTaskObjects := s.convertTasksToTaskSystem(runningTasks) workerObjects := s.convertWorkersToTaskSystem(availableWorkers) - // Get the appropriate scheduler scheduler := s.taskRegistry.GetScheduler(taskType) if scheduler == nil { return false } - canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects) return canSchedule diff --git a/weed/cluster/master_client.go b/weed/cluster/master_client.go index bab2360fe..69c53c1de 100644 --- a/weed/cluster/master_client.go +++ b/weed/cluster/master_client.go @@ -16,6 +16,9 @@ func ListExistingPeerUpdates(master pb.ServerAddress, grpcDialOption grpc.DialOp ClientType: clientType, FilerGroup: filerGroup, }) + if err != nil { + return err + } glog.V(0).Infof("the cluster has %d %s\n", len(resp.ClusterNodes), clientType) for _, node := range resp.ClusterNodes { @@ -26,7 +29,7 @@ func ListExistingPeerUpdates(master pb.ServerAddress, grpcDialOption grpc.DialOp CreatedAtNs: node.CreatedAtNs, }) } - return err + return nil }); grpcErr != nil { glog.V(0).Infof("connect to %s: %v", master, grpcErr) } diff --git a/weed/command/autocomplete.go b/weed/command/autocomplete.go index d9e4c6b04..6a74311dc 100644 --- a/weed/command/autocomplete.go +++ b/weed/command/autocomplete.go @@ -2,11 +2,11 @@ package command import ( "fmt" - "os" - "path/filepath" "github.com/posener/complete" completeinstall "github.com/posener/complete/cmd/install" flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" + "os" + "path/filepath" "runtime" ) @@ -53,14 +53,14 @@ func printAutocompleteScript(shell string) bool { return false } - switch shell { - case "bash": - fmt.Printf("complete -C %q weed\n", binPath) - case "zsh": - fmt.Printf("autoload -U +X bashcompinit && bashcompinit\n") - fmt.Printf("complete -o nospace -C %q weed\n", binPath) - case "fish": - fmt.Printf(`function __complete_weed + switch shell { + case "bash": + fmt.Printf("complete -C %q weed\n", binPath) + case "zsh": + fmt.Printf("autoload -U +X bashcompinit && bashcompinit\n") + fmt.Printf("complete -o nospace -C %q weed\n", binPath) + case "fish": + fmt.Printf(`function __complete_weed set -lx COMP_LINE (commandline -cp) test -z (commandline -ct) and set COMP_LINE "$COMP_LINE " @@ -68,10 +68,10 @@ func printAutocompleteScript(shell string) bool { end complete -f -c weed -a "(__complete_weed)" `, binPath) - default: - fmt.Fprintf(os.Stderr, "unsupported shell: %s. Supported shells: bash, zsh, fish\n", shell) - return false - } + default: + fmt.Fprintf(os.Stderr, "unsupported shell: %s. Supported shells: bash, zsh, fish\n", shell) + return false + } return true } diff --git a/weed/mq/broker/broker_grpc_sub.go b/weed/mq/broker/broker_grpc_sub.go index f20e1a065..51a74c6a9 100644 --- a/weed/mq/broker/broker_grpc_sub.go +++ b/weed/mq/broker/broker_grpc_sub.go @@ -272,7 +272,6 @@ subscribeLoop: TsNs: logEntry.TsNs, } - if err := stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Data{ Data: dataMsg, }}); err != nil { diff --git a/weed/mq/kafka/consumer/incremental_rebalancing_test.go b/weed/mq/kafka/consumer/incremental_rebalancing_test.go index 1352b2da0..64f0ba085 100644 --- a/weed/mq/kafka/consumer/incremental_rebalancing_test.go +++ b/weed/mq/kafka/consumer/incremental_rebalancing_test.go @@ -103,15 +103,15 @@ func TestIncrementalCooperativeAssignmentStrategy_RebalanceWithRevocation(t *tes t.Errorf("Expected member-2 to have 0 partitions during revocation, got %d", len(member2Assignments)) } - t.Logf("Revocation phase - Member-1: %d partitions, Member-2: %d partitions", + t.Logf("Revocation phase - Member-1: %d partitions, Member-2: %d partitions", len(member1Assignments), len(member2Assignments)) // Simulate time passing and second call (should move to assignment phase) time.Sleep(10 * time.Millisecond) - + // Force move to assignment phase by setting timeout to 0 state.RevocationTimeout = 0 - + assignments2 := strategy.Assign(members, topicPartitions) // Should complete rebalance @@ -136,7 +136,7 @@ func TestIncrementalCooperativeAssignmentStrategy_RebalanceWithRevocation(t *tes t.Errorf("Expected 4 total partitions after rebalance, got %d", totalFinalPartitions) } - t.Logf("Final assignment - Member-1: %d partitions, Member-2: %d partitions", + t.Logf("Final assignment - Member-1: %d partitions, Member-2: %d partitions", len(member1FinalAssignments), len(member2FinalAssignments)) } @@ -239,7 +239,7 @@ func TestIncrementalCooperativeAssignmentStrategy_MultipleTopics(t *testing.T) { t.Errorf("Expected partition %s to be assigned", expected) } } - + // Debug: Print all assigned partitions t.Logf("All assigned partitions: %v", allAssignedPartitions) } @@ -390,7 +390,7 @@ func TestIncrementalCooperativeAssignmentStrategy_StateTransitions(t *testing.T) // Force timeout to move to assignment phase state.RevocationTimeout = 0 strategy.Assign(members, topicPartitions) - + // Should complete and return to None state = strategy.GetRebalanceState() if state.Phase != RebalancePhaseNone { diff --git a/weed/mq/kafka/consumer/rebalance_timeout.go b/weed/mq/kafka/consumer/rebalance_timeout.go index 9844723c0..f4f65f37b 100644 --- a/weed/mq/kafka/consumer/rebalance_timeout.go +++ b/weed/mq/kafka/consumer/rebalance_timeout.go @@ -24,12 +24,12 @@ func (rtm *RebalanceTimeoutManager) CheckRebalanceTimeouts() { for _, group := range rtm.coordinator.groups { group.Mu.Lock() - + // Only check timeouts for groups in rebalancing states if group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance { rtm.checkGroupRebalanceTimeout(group, now) } - + group.Mu.Unlock() } } @@ -37,7 +37,7 @@ func (rtm *RebalanceTimeoutManager) CheckRebalanceTimeouts() { // checkGroupRebalanceTimeout checks and handles rebalance timeout for a specific group func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGroup, now time.Time) { expiredMembers := make([]string, 0) - + for memberID, member := range group.Members { // Check if member has exceeded its rebalance timeout rebalanceTimeout := time.Duration(member.RebalanceTimeout) * time.Millisecond @@ -45,21 +45,21 @@ func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGr // Use default rebalance timeout if not specified rebalanceTimeout = time.Duration(rtm.coordinator.rebalanceTimeoutMs) * time.Millisecond } - + // For members in pending state during rebalance, check against join time if member.State == MemberStatePending { if now.Sub(member.JoinedAt) > rebalanceTimeout { expiredMembers = append(expiredMembers, memberID) } } - + // Also check session timeout as a fallback sessionTimeout := time.Duration(member.SessionTimeout) * time.Millisecond if now.Sub(member.LastHeartbeat) > sessionTimeout { expiredMembers = append(expiredMembers, memberID) } } - + // Remove expired members and trigger rebalance if necessary if len(expiredMembers) > 0 { rtm.evictExpiredMembers(group, expiredMembers) @@ -70,13 +70,13 @@ func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGr func (rtm *RebalanceTimeoutManager) evictExpiredMembers(group *ConsumerGroup, expiredMembers []string) { for _, memberID := range expiredMembers { delete(group.Members, memberID) - + // If the leader was evicted, clear leader if group.Leader == memberID { group.Leader = "" } } - + // Update group state based on remaining members if len(group.Members) == 0 { group.State = GroupStateEmpty @@ -92,18 +92,18 @@ func (rtm *RebalanceTimeoutManager) evictExpiredMembers(group *ConsumerGroup, ex break } } - + // Reset to preparing rebalance to restart the process group.State = GroupStatePreparingRebalance group.Generation++ - + // Mark remaining members as pending for _, member := range group.Members { member.State = MemberStatePending } } } - + group.LastActivity = time.Now() } @@ -112,7 +112,7 @@ func (rtm *RebalanceTimeoutManager) IsRebalanceStuck(group *ConsumerGroup, maxRe if group.State != GroupStatePreparingRebalance && group.State != GroupStateCompletingRebalance { return false } - + return time.Since(group.LastActivity) > maxRebalanceDuration } @@ -120,14 +120,14 @@ func (rtm *RebalanceTimeoutManager) IsRebalanceStuck(group *ConsumerGroup, maxRe func (rtm *RebalanceTimeoutManager) ForceCompleteRebalance(group *ConsumerGroup) { group.Mu.Lock() defer group.Mu.Unlock() - + // If stuck in preparing rebalance, move to completing if group.State == GroupStatePreparingRebalance { group.State = GroupStateCompletingRebalance group.LastActivity = time.Now() return } - + // If stuck in completing rebalance, force to stable if group.State == GroupStateCompletingRebalance { group.State = GroupStateStable @@ -145,21 +145,21 @@ func (rtm *RebalanceTimeoutManager) GetRebalanceStatus(groupID string) *Rebalanc if group == nil { return nil } - + group.Mu.RLock() defer group.Mu.RUnlock() - + status := &RebalanceStatus{ - GroupID: groupID, - State: group.State, - Generation: group.Generation, - MemberCount: len(group.Members), - Leader: group.Leader, - LastActivity: group.LastActivity, - IsRebalancing: group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance, + GroupID: groupID, + State: group.State, + Generation: group.Generation, + MemberCount: len(group.Members), + Leader: group.Leader, + LastActivity: group.LastActivity, + IsRebalancing: group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance, RebalanceDuration: time.Since(group.LastActivity), } - + // Calculate member timeout status now := time.Now() for memberID, member := range group.Members { @@ -171,48 +171,48 @@ func (rtm *RebalanceTimeoutManager) GetRebalanceStatus(groupID string) *Rebalanc SessionTimeout: time.Duration(member.SessionTimeout) * time.Millisecond, RebalanceTimeout: time.Duration(member.RebalanceTimeout) * time.Millisecond, } - + // Calculate time until session timeout sessionTimeRemaining := memberStatus.SessionTimeout - now.Sub(member.LastHeartbeat) if sessionTimeRemaining < 0 { sessionTimeRemaining = 0 } memberStatus.SessionTimeRemaining = sessionTimeRemaining - + // Calculate time until rebalance timeout rebalanceTimeRemaining := memberStatus.RebalanceTimeout - now.Sub(member.JoinedAt) if rebalanceTimeRemaining < 0 { rebalanceTimeRemaining = 0 } memberStatus.RebalanceTimeRemaining = rebalanceTimeRemaining - + status.Members = append(status.Members, memberStatus) } - + return status } // RebalanceStatus represents the current status of a group's rebalance type RebalanceStatus struct { - GroupID string `json:"group_id"` - State GroupState `json:"state"` - Generation int32 `json:"generation"` - MemberCount int `json:"member_count"` - Leader string `json:"leader"` - LastActivity time.Time `json:"last_activity"` - IsRebalancing bool `json:"is_rebalancing"` - RebalanceDuration time.Duration `json:"rebalance_duration"` - Members []MemberTimeoutStatus `json:"members"` + GroupID string `json:"group_id"` + State GroupState `json:"state"` + Generation int32 `json:"generation"` + MemberCount int `json:"member_count"` + Leader string `json:"leader"` + LastActivity time.Time `json:"last_activity"` + IsRebalancing bool `json:"is_rebalancing"` + RebalanceDuration time.Duration `json:"rebalance_duration"` + Members []MemberTimeoutStatus `json:"members"` } // MemberTimeoutStatus represents timeout status for a group member type MemberTimeoutStatus struct { - MemberID string `json:"member_id"` - State MemberState `json:"state"` - LastHeartbeat time.Time `json:"last_heartbeat"` - JoinedAt time.Time `json:"joined_at"` - SessionTimeout time.Duration `json:"session_timeout"` - RebalanceTimeout time.Duration `json:"rebalance_timeout"` - SessionTimeRemaining time.Duration `json:"session_time_remaining"` - RebalanceTimeRemaining time.Duration `json:"rebalance_time_remaining"` + MemberID string `json:"member_id"` + State MemberState `json:"state"` + LastHeartbeat time.Time `json:"last_heartbeat"` + JoinedAt time.Time `json:"joined_at"` + SessionTimeout time.Duration `json:"session_timeout"` + RebalanceTimeout time.Duration `json:"rebalance_timeout"` + SessionTimeRemaining time.Duration `json:"session_time_remaining"` + RebalanceTimeRemaining time.Duration `json:"rebalance_time_remaining"` } diff --git a/weed/mq/kafka/consumer/rebalance_timeout_test.go b/weed/mq/kafka/consumer/rebalance_timeout_test.go index ac5f90aee..61dbf3fc5 100644 --- a/weed/mq/kafka/consumer/rebalance_timeout_test.go +++ b/weed/mq/kafka/consumer/rebalance_timeout_test.go @@ -8,14 +8,14 @@ import ( func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) { coordinator := NewGroupCoordinator() defer coordinator.Close() - + rtm := coordinator.rebalanceTimeoutManager - + // Create a group with a member that has a short rebalance timeout group := coordinator.GetOrCreateGroup("test-group") group.Mu.Lock() group.State = GroupStatePreparingRebalance - + member := &GroupMember{ ID: "member1", ClientID: "client1", @@ -27,15 +27,15 @@ func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) { } group.Members["member1"] = member group.Mu.Unlock() - + // Check timeouts - member should be evicted rtm.CheckRebalanceTimeouts() - + group.Mu.RLock() if len(group.Members) != 0 { t.Errorf("Expected member to be evicted due to rebalance timeout, but %d members remain", len(group.Members)) } - + if group.State != GroupStateEmpty { t.Errorf("Expected group state to be Empty after member eviction, got %s", group.State.String()) } @@ -45,18 +45,18 @@ func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) { func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) { coordinator := NewGroupCoordinator() defer coordinator.Close() - + rtm := coordinator.rebalanceTimeoutManager - + // Create a group with a member that has exceeded session timeout group := coordinator.GetOrCreateGroup("test-group") group.Mu.Lock() group.State = GroupStatePreparingRebalance - + member := &GroupMember{ ID: "member1", ClientID: "client1", - SessionTimeout: 1000, // 1 second + SessionTimeout: 1000, // 1 second RebalanceTimeout: 30000, // 30 seconds State: MemberStatePending, LastHeartbeat: time.Now().Add(-2 * time.Second), // Last heartbeat 2 seconds ago @@ -64,10 +64,10 @@ func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) { } group.Members["member1"] = member group.Mu.Unlock() - + // Check timeouts - member should be evicted due to session timeout rtm.CheckRebalanceTimeouts() - + group.Mu.RLock() if len(group.Members) != 0 { t.Errorf("Expected member to be evicted due to session timeout, but %d members remain", len(group.Members)) @@ -78,15 +78,15 @@ func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) { func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) { coordinator := NewGroupCoordinator() defer coordinator.Close() - + rtm := coordinator.rebalanceTimeoutManager - + // Create a group with leader and another member group := coordinator.GetOrCreateGroup("test-group") group.Mu.Lock() group.State = GroupStatePreparingRebalance group.Leader = "member1" - + // Leader with expired rebalance timeout leader := &GroupMember{ ID: "member1", @@ -98,7 +98,7 @@ func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) { JoinedAt: time.Now().Add(-2 * time.Second), } group.Members["member1"] = leader - + // Another member that's still valid member2 := &GroupMember{ ID: "member2", @@ -111,19 +111,19 @@ func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) { } group.Members["member2"] = member2 group.Mu.Unlock() - + // Check timeouts - leader should be evicted, new leader selected rtm.CheckRebalanceTimeouts() - + group.Mu.RLock() if len(group.Members) != 1 { t.Errorf("Expected 1 member to remain after leader eviction, got %d", len(group.Members)) } - + if group.Leader != "member2" { t.Errorf("Expected member2 to become new leader, got %s", group.Leader) } - + if group.State != GroupStatePreparingRebalance { t.Errorf("Expected group to restart rebalancing after leader eviction, got %s", group.State.String()) } @@ -133,37 +133,37 @@ func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) { func TestRebalanceTimeoutManager_IsRebalanceStuck(t *testing.T) { coordinator := NewGroupCoordinator() defer coordinator.Close() - + rtm := coordinator.rebalanceTimeoutManager - + // Create a group that's been rebalancing for a while group := coordinator.GetOrCreateGroup("test-group") group.Mu.Lock() group.State = GroupStatePreparingRebalance group.LastActivity = time.Now().Add(-15 * time.Minute) // 15 minutes ago group.Mu.Unlock() - + // Check if rebalance is stuck (max 10 minutes) maxDuration := 10 * time.Minute if !rtm.IsRebalanceStuck(group, maxDuration) { t.Error("Expected rebalance to be detected as stuck") } - + // Test with a group that's not stuck group.Mu.Lock() group.LastActivity = time.Now().Add(-5 * time.Minute) // 5 minutes ago group.Mu.Unlock() - + if rtm.IsRebalanceStuck(group, maxDuration) { t.Error("Expected rebalance to not be detected as stuck") } - + // Test with stable group (should not be stuck) group.Mu.Lock() group.State = GroupStateStable group.LastActivity = time.Now().Add(-15 * time.Minute) group.Mu.Unlock() - + if rtm.IsRebalanceStuck(group, maxDuration) { t.Error("Stable group should not be detected as stuck") } @@ -172,37 +172,37 @@ func TestRebalanceTimeoutManager_IsRebalanceStuck(t *testing.T) { func TestRebalanceTimeoutManager_ForceCompleteRebalance(t *testing.T) { coordinator := NewGroupCoordinator() defer coordinator.Close() - + rtm := coordinator.rebalanceTimeoutManager - + // Test forcing completion from PreparingRebalance group := coordinator.GetOrCreateGroup("test-group") group.Mu.Lock() group.State = GroupStatePreparingRebalance - + member := &GroupMember{ ID: "member1", State: MemberStatePending, } group.Members["member1"] = member group.Mu.Unlock() - + rtm.ForceCompleteRebalance(group) - + group.Mu.RLock() if group.State != GroupStateCompletingRebalance { t.Errorf("Expected group state to be CompletingRebalance, got %s", group.State.String()) } group.Mu.RUnlock() - + // Test forcing completion from CompletingRebalance rtm.ForceCompleteRebalance(group) - + group.Mu.RLock() if group.State != GroupStateStable { t.Errorf("Expected group state to be Stable, got %s", group.State.String()) } - + if member.State != MemberStateStable { t.Errorf("Expected member state to be Stable, got %s", member.State.String()) } @@ -212,15 +212,15 @@ func TestRebalanceTimeoutManager_ForceCompleteRebalance(t *testing.T) { func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) { coordinator := NewGroupCoordinator() defer coordinator.Close() - + rtm := coordinator.rebalanceTimeoutManager - + // Test with non-existent group status := rtm.GetRebalanceStatus("non-existent") if status != nil { t.Error("Expected nil status for non-existent group") } - + // Create a group with members group := coordinator.GetOrCreateGroup("test-group") group.Mu.Lock() @@ -228,7 +228,7 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) { group.Generation = 5 group.Leader = "member1" group.LastActivity = time.Now().Add(-2 * time.Minute) - + member1 := &GroupMember{ ID: "member1", State: MemberStatePending, @@ -238,7 +238,7 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) { RebalanceTimeout: 300000, // 5 minutes } group.Members["member1"] = member1 - + member2 := &GroupMember{ ID: "member2", State: MemberStatePending, @@ -249,48 +249,48 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) { } group.Members["member2"] = member2 group.Mu.Unlock() - + // Get status status = rtm.GetRebalanceStatus("test-group") - + if status == nil { t.Fatal("Expected non-nil status") } - + if status.GroupID != "test-group" { t.Errorf("Expected group ID 'test-group', got %s", status.GroupID) } - + if status.State != GroupStatePreparingRebalance { t.Errorf("Expected state PreparingRebalance, got %s", status.State.String()) } - + if status.Generation != 5 { t.Errorf("Expected generation 5, got %d", status.Generation) } - + if status.MemberCount != 2 { t.Errorf("Expected 2 members, got %d", status.MemberCount) } - + if status.Leader != "member1" { t.Errorf("Expected leader 'member1', got %s", status.Leader) } - + if !status.IsRebalancing { t.Error("Expected IsRebalancing to be true") } - + if len(status.Members) != 2 { t.Errorf("Expected 2 member statuses, got %d", len(status.Members)) } - + // Check member timeout calculations for _, memberStatus := range status.Members { if memberStatus.SessionTimeRemaining < 0 { t.Errorf("Session time remaining should not be negative for member %s", memberStatus.MemberID) } - + if memberStatus.RebalanceTimeRemaining < 0 { t.Errorf("Rebalance time remaining should not be negative for member %s", memberStatus.MemberID) } @@ -300,14 +300,14 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) { func TestRebalanceTimeoutManager_DefaultRebalanceTimeout(t *testing.T) { coordinator := NewGroupCoordinator() defer coordinator.Close() - + rtm := coordinator.rebalanceTimeoutManager - + // Create a group with a member that has no rebalance timeout set (0) group := coordinator.GetOrCreateGroup("test-group") group.Mu.Lock() group.State = GroupStatePreparingRebalance - + member := &GroupMember{ ID: "member1", ClientID: "client1", @@ -319,10 +319,10 @@ func TestRebalanceTimeoutManager_DefaultRebalanceTimeout(t *testing.T) { } group.Members["member1"] = member group.Mu.Unlock() - + // Default rebalance timeout is 5 minutes (300000ms), so member should be evicted rtm.CheckRebalanceTimeouts() - + group.Mu.RLock() if len(group.Members) != 0 { t.Errorf("Expected member to be evicted using default rebalance timeout, but %d members remain", len(group.Members)) diff --git a/weed/mq/kafka/consumer_offset/memory_storage.go b/weed/mq/kafka/consumer_offset/memory_storage.go index 8814107bb..6e5c95782 100644 --- a/weed/mq/kafka/consumer_offset/memory_storage.go +++ b/weed/mq/kafka/consumer_offset/memory_storage.go @@ -142,4 +142,3 @@ func (m *MemoryStorage) Close() error { return nil } - diff --git a/weed/mq/kafka/consumer_offset/memory_storage_test.go b/weed/mq/kafka/consumer_offset/memory_storage_test.go index eaf849dc5..22720267b 100644 --- a/weed/mq/kafka/consumer_offset/memory_storage_test.go +++ b/weed/mq/kafka/consumer_offset/memory_storage_test.go @@ -206,4 +206,3 @@ func TestMemoryStorageOverwrite(t *testing.T) { assert.Equal(t, int64(20), offset) assert.Equal(t, "meta2", metadata) } - diff --git a/weed/mq/kafka/consumer_offset/storage.go b/weed/mq/kafka/consumer_offset/storage.go index d3f999faa..ad191b936 100644 --- a/weed/mq/kafka/consumer_offset/storage.go +++ b/weed/mq/kafka/consumer_offset/storage.go @@ -56,4 +56,3 @@ var ( ErrInvalidPartition = fmt.Errorf("invalid partition") ErrStorageClosed = fmt.Errorf("storage is closed") ) - diff --git a/weed/mq/kafka/gateway/test_mock_handler.go b/weed/mq/kafka/gateway/test_mock_handler.go index c01aac970..ef0a012ef 100644 --- a/weed/mq/kafka/gateway/test_mock_handler.go +++ b/weed/mq/kafka/gateway/test_mock_handler.go @@ -121,7 +121,6 @@ func (m *mockSeaweedMQHandler) ProduceRecord(ctx context.Context, topicName stri offset := m.offsets[topicName][partitionID] m.offsets[topicName][partitionID]++ - // Store record record := &mockRecord{ key: key, diff --git a/weed/mq/kafka/package.go b/weed/mq/kafka/package.go index 01743a12b..1cb5dc8ed 100644 --- a/weed/mq/kafka/package.go +++ b/weed/mq/kafka/package.go @@ -9,5 +9,3 @@ package kafka // - offset/: Offset management // - schema/: Schema registry integration // - consumer/: Consumer group coordination - - diff --git a/weed/mq/kafka/partition_mapping.go b/weed/mq/kafka/partition_mapping.go index 697e67386..a956c3cde 100644 --- a/weed/mq/kafka/partition_mapping.go +++ b/weed/mq/kafka/partition_mapping.go @@ -51,5 +51,3 @@ func GetRangeSize() int32 { func GetMaxKafkaPartitions() int32 { return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions } - - diff --git a/weed/mq/kafka/protocol/describe_cluster.go b/weed/mq/kafka/protocol/describe_cluster.go index af622de3c..5d963e45b 100644 --- a/weed/mq/kafka/protocol/describe_cluster.go +++ b/weed/mq/kafka/protocol/describe_cluster.go @@ -37,7 +37,6 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16, // Tagged fields at end of request // (We don't parse them, just skip) - // Build response response := make([]byte, 0, 256) @@ -109,6 +108,5 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16, // Response-level tagged fields (flexible response) response = append(response, 0x00) // Empty tagged fields - return response, nil } diff --git a/weed/mq/kafka/protocol/flexible_versions.go b/weed/mq/kafka/protocol/flexible_versions.go index ddb55e74f..77d1510ae 100644 --- a/weed/mq/kafka/protocol/flexible_versions.go +++ b/weed/mq/kafka/protocol/flexible_versions.go @@ -268,7 +268,6 @@ func parseCompactString(data []byte) ([]byte, int) { return nil, 0 } - if actualLength == 0 { // Empty string (length was 1) return []byte{}, consumed diff --git a/weed/mq/kafka/protocol/group_introspection.go b/weed/mq/kafka/protocol/group_introspection.go index 0ff3ed4b5..959a015a1 100644 --- a/weed/mq/kafka/protocol/group_introspection.go +++ b/weed/mq/kafka/protocol/group_introspection.go @@ -107,13 +107,13 @@ func (h *Handler) describeGroup(groupID string) DescribeGroupsGroup { } return DescribeGroupsGroup{ - ErrorCode: 0, - GroupID: groupID, - State: stateStr, - ProtocolType: "consumer", // Default protocol type - Protocol: group.Protocol, - Members: members, - AuthorizedOps: []int32{}, // Empty for now + ErrorCode: 0, + GroupID: groupID, + State: stateStr, + ProtocolType: "consumer", // Default protocol type + Protocol: group.Protocol, + Members: members, + AuthorizedOps: []int32{}, // Empty for now } } @@ -175,8 +175,8 @@ func (h *Handler) listAllGroups(statesFilter []string) []ListGroupsGroup { // Request/Response structures type DescribeGroupsRequest struct { - GroupIDs []string - IncludeAuthorizedOps bool + GroupIDs []string + IncludeAuthorizedOps bool } type DescribeGroupsResponse struct { diff --git a/weed/mq/kafka/protocol/handler.go b/weed/mq/kafka/protocol/handler.go index 2768793d2..8dffd2313 100644 --- a/weed/mq/kafka/protocol/handler.go +++ b/weed/mq/kafka/protocol/handler.go @@ -661,7 +661,7 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error { return } // Removed V(4) logging from hot path - only log errors and important events - + // Wrap request processing with panic recovery to prevent deadlocks // If processRequestSync panics, we MUST still send a response to avoid blocking the response writer var response []byte @@ -881,7 +881,6 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error { return fmt.Errorf("read message: %w", err) } - // Parse at least the basic header to get API key and correlation ID if len(messageBuf) < 8 { return fmt.Errorf("message too short") @@ -890,7 +889,7 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error { apiKey := binary.BigEndian.Uint16(messageBuf[0:2]) apiVersion := binary.BigEndian.Uint16(messageBuf[2:4]) correlationID := binary.BigEndian.Uint32(messageBuf[4:8]) - + // Validate API version against what we support if err := h.validateAPIVersion(apiKey, apiVersion); err != nil { glog.Errorf("API VERSION VALIDATION FAILED: Key=%d (%s), Version=%d, error=%v", apiKey, getAPIName(APIKey(apiKey)), apiVersion, err) @@ -1050,7 +1049,6 @@ func (h *Handler) processRequestSync(req *kafkaRequest) ([]byte, error) { requestStart := time.Now() apiName := getAPIName(APIKey(req.apiKey)) - // Only log high-volume requests at V(2), not V(4) if glog.V(2) { glog.V(2).Infof("[API] %s (key=%d, ver=%d, corr=%d)", @@ -1589,15 +1587,15 @@ func (h *Handler) HandleMetadataV2(correlationID uint32, requestBody []byte) ([] for partitionID := int32(0); partitionID < partitionCount; partitionID++ { binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex - binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID + binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID // ReplicaNodes array (4 bytes length + nodes) binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 + binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 // IsrNodes array (4 bytes length + nodes) binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 + binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 } } @@ -1716,15 +1714,15 @@ func (h *Handler) HandleMetadataV3V4(correlationID uint32, requestBody []byte) ( for partitionID := int32(0); partitionID < partitionCount; partitionID++ { binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex - binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID + binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID // ReplicaNodes array (4 bytes length + nodes) binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 + binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 // IsrNodes array (4 bytes length + nodes) binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 + binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 } } @@ -1737,7 +1735,7 @@ func (h *Handler) HandleMetadataV3V4(correlationID uint32, requestBody []byte) ( } if len(response) > 100 { } - + return response, nil } @@ -1828,7 +1826,6 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte, // NOTE: Correlation ID is handled by writeResponseWithCorrelationID // Do NOT include it in the response body - // ThrottleTimeMs (4 bytes) - v3+ addition binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling @@ -1896,7 +1893,7 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte, for partitionID := int32(0); partitionID < partitionCount; partitionID++ { binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex - binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID + binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID // LeaderEpoch (4 bytes) - v7+ addition if apiVersion >= 7 { @@ -1905,11 +1902,11 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte, // ReplicaNodes array (4 bytes length + nodes) binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 + binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 // IsrNodes array (4 bytes length + nodes) binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 + binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 // OfflineReplicas array (4 bytes length + nodes) - v5+ addition binary.Write(&buf, binary.BigEndian, int32(0)) // No offline replicas @@ -1930,7 +1927,7 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte, } if len(response) > 100 { } - + return response, nil } @@ -1994,12 +1991,11 @@ func (h *Handler) handleListOffsets(correlationID uint32, apiVersion uint16, req // Parse minimal request to understand what's being asked (header already stripped) offset := 0 - maxBytes := len(requestBody) if maxBytes > 64 { maxBytes = 64 } - + // v1+ has replica_id(4) if apiVersion >= 1 { if len(requestBody) < offset+4 { @@ -3930,12 +3926,11 @@ func (h *Handler) handleInitProducerId(correlationID uint32, apiVersion uint16, // v2+: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) + producer_id(INT64) + producer_epoch(INT16) // v4+: Uses flexible format with tagged fields - maxBytes := len(requestBody) if maxBytes > 64 { maxBytes = 64 } - + offset := 0 // Parse transactional_id (NULLABLE_STRING or COMPACT_NULLABLE_STRING for flexible versions) diff --git a/weed/mq/kafka/protocol/offset_storage_adapter.go b/weed/mq/kafka/protocol/offset_storage_adapter.go index 079c5b621..0481b4c42 100644 --- a/weed/mq/kafka/protocol/offset_storage_adapter.go +++ b/weed/mq/kafka/protocol/offset_storage_adapter.go @@ -47,4 +47,3 @@ func (a *offsetStorageAdapter) DeleteGroup(group string) error { func (a *offsetStorageAdapter) Close() error { return a.storage.Close() } - diff --git a/weed/mq/kafka/protocol/response_validation_example_test.go b/weed/mq/kafka/protocol/response_validation_example_test.go index 9476bb791..a69c03f4f 100644 --- a/weed/mq/kafka/protocol/response_validation_example_test.go +++ b/weed/mq/kafka/protocol/response_validation_example_test.go @@ -140,4 +140,3 @@ func TestMetadataResponseHasBrokers(t *testing.T) { t.Logf("✓ Metadata response correctly has %d broker(s)", parsedCount) } - diff --git a/weed/mq/kafka/schema/envelope_test.go b/weed/mq/kafka/schema/envelope_test.go index 4a209779e..24f16ee44 100644 --- a/weed/mq/kafka/schema/envelope_test.go +++ b/weed/mq/kafka/schema/envelope_test.go @@ -7,46 +7,46 @@ import ( func TestParseConfluentEnvelope(t *testing.T) { tests := []struct { - name string - input []byte - expectOK bool - expectID uint32 + name string + input []byte + expectOK bool + expectID uint32 expectFormat Format }{ { - name: "valid Avro message", - input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, // schema ID 1 + "Hello" - expectOK: true, - expectID: 1, + name: "valid Avro message", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, // schema ID 1 + "Hello" + expectOK: true, + expectID: 1, expectFormat: FormatAvro, }, { - name: "valid message with larger schema ID", - input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, // schema ID 1234 + "foo" - expectOK: true, - expectID: 1234, + name: "valid message with larger schema ID", + input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, // schema ID 1234 + "foo" + expectOK: true, + expectID: 1234, expectFormat: FormatAvro, }, { - name: "too short message", - input: []byte{0x00, 0x00, 0x00}, - expectOK: false, + name: "too short message", + input: []byte{0x00, 0x00, 0x00}, + expectOK: false, }, { - name: "no magic byte", - input: []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, - expectOK: false, + name: "no magic byte", + input: []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, + expectOK: false, }, { - name: "empty message", - input: []byte{}, - expectOK: false, + name: "empty message", + input: []byte{}, + expectOK: false, }, { - name: "minimal valid message", - input: []byte{0x00, 0x00, 0x00, 0x00, 0x01}, // schema ID 1, empty payload - expectOK: true, - expectID: 1, + name: "minimal valid message", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x01}, // schema ID 1, empty payload + expectOK: true, + expectID: 1, expectFormat: FormatAvro, }, } @@ -54,24 +54,24 @@ func TestParseConfluentEnvelope(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { envelope, ok := ParseConfluentEnvelope(tt.input) - + if ok != tt.expectOK { t.Errorf("ParseConfluentEnvelope() ok = %v, want %v", ok, tt.expectOK) return } - + if !tt.expectOK { return // No need to check further if we expected failure } - + if envelope.SchemaID != tt.expectID { t.Errorf("ParseConfluentEnvelope() schemaID = %v, want %v", envelope.SchemaID, tt.expectID) } - + if envelope.Format != tt.expectFormat { t.Errorf("ParseConfluentEnvelope() format = %v, want %v", envelope.Format, tt.expectFormat) } - + // Verify payload extraction expectedPayloadLen := len(tt.input) - 5 // 5 bytes for magic + schema ID if len(envelope.Payload) != expectedPayloadLen { @@ -150,11 +150,11 @@ func TestExtractSchemaID(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { id, ok := ExtractSchemaID(tt.input) - + if ok != tt.expectOK { t.Errorf("ExtractSchemaID() ok = %v, want %v", ok, tt.expectOK) } - + if id != tt.expectID { t.Errorf("ExtractSchemaID() id = %v, want %v", id, tt.expectID) } @@ -200,12 +200,12 @@ func TestCreateConfluentEnvelope(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := CreateConfluentEnvelope(tt.format, tt.schemaID, tt.indexes, tt.payload) - + if len(result) != len(tt.expected) { t.Errorf("CreateConfluentEnvelope() length = %v, want %v", len(result), len(tt.expected)) return } - + for i, b := range result { if b != tt.expected[i] { t.Errorf("CreateConfluentEnvelope() byte[%d] = %v, want %v", i, b, tt.expected[i]) @@ -262,7 +262,7 @@ func TestEnvelopeValidate(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.envelope.Validate() - + if (err != nil) != tt.expectErr { t.Errorf("Envelope.Validate() error = %v, expectErr %v", err, tt.expectErr) } @@ -297,7 +297,7 @@ func TestEnvelopeMetadata(t *testing.T) { func BenchmarkParseConfluentEnvelope(b *testing.B) { // Create a test message testMsg := make([]byte, 1024) - testMsg[0] = 0x00 // Magic byte + testMsg[0] = 0x00 // Magic byte binary.BigEndian.PutUint32(testMsg[1:5], 123) // Schema ID // Fill rest with dummy data for i := 5; i < len(testMsg); i++ { diff --git a/weed/mq/kafka/schema/envelope_varint_test.go b/weed/mq/kafka/schema/envelope_varint_test.go index 92004c3d6..8bc51d7a0 100644 --- a/weed/mq/kafka/schema/envelope_varint_test.go +++ b/weed/mq/kafka/schema/envelope_varint_test.go @@ -100,7 +100,7 @@ func TestCreateConfluentEnvelopeWithProtobufIndexes(t *testing.T) { parsed, ok := ParseConfluentEnvelope(envelope) require.True(t, ok, "Should be able to parse envelope") assert.Equal(t, tc.schemaID, parsed.SchemaID) - + if tc.format == FormatProtobuf && len(tc.indexes) == 0 { // For Protobuf without indexes, payload should match assert.Equal(t, tc.payload, parsed.Payload, "Payload should match") diff --git a/weed/mq/metadata_constants.go b/weed/mq/metadata_constants.go index 18ba98a31..31f86c910 100644 --- a/weed/mq/metadata_constants.go +++ b/weed/mq/metadata_constants.go @@ -17,5 +17,3 @@ const ( // Source file tracking for parquet deduplication ExtendedAttrSources = "sources" // JSON-encoded list of source log files ) - - diff --git a/weed/mq/offset/migration.go b/weed/mq/offset/migration.go index 106129206..4e0a6ab12 100644 --- a/weed/mq/offset/migration.go +++ b/weed/mq/offset/migration.go @@ -118,17 +118,17 @@ func (m *MigrationManager) GetCurrentVersion() (int, error) { if err != nil { return 0, fmt.Errorf("failed to create migrations table: %w", err) } - + var version sql.NullInt64 err = m.db.QueryRow("SELECT MAX(version) FROM schema_migrations").Scan(&version) if err != nil { return 0, fmt.Errorf("failed to get current version: %w", err) } - + if !version.Valid { return 0, nil // No migrations applied yet } - + return int(version.Int64), nil } @@ -138,29 +138,29 @@ func (m *MigrationManager) ApplyMigrations() error { if err != nil { return fmt.Errorf("failed to get current version: %w", err) } - + migrations := GetMigrations() - + for _, migration := range migrations { if migration.Version <= currentVersion { continue // Already applied } - + fmt.Printf("Applying migration %d: %s\n", migration.Version, migration.Description) - + // Begin transaction tx, err := m.db.Begin() if err != nil { return fmt.Errorf("failed to begin transaction for migration %d: %w", migration.Version, err) } - + // Execute migration SQL _, err = tx.Exec(migration.SQL) if err != nil { tx.Rollback() return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err) } - + // Record migration as applied _, err = tx.Exec( "INSERT INTO schema_migrations (version, description, applied_at) VALUES (?, ?, ?)", @@ -172,16 +172,16 @@ func (m *MigrationManager) ApplyMigrations() error { tx.Rollback() return fmt.Errorf("failed to record migration %d: %w", migration.Version, err) } - + // Commit transaction err = tx.Commit() if err != nil { return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err) } - + fmt.Printf("Successfully applied migration %d\n", migration.Version) } - + return nil } @@ -203,7 +203,7 @@ func (m *MigrationManager) GetAppliedMigrations() ([]AppliedMigration, error) { return nil, fmt.Errorf("failed to query applied migrations: %w", err) } defer rows.Close() - + var migrations []AppliedMigration for rows.Next() { var migration AppliedMigration @@ -213,7 +213,7 @@ func (m *MigrationManager) GetAppliedMigrations() ([]AppliedMigration, error) { } migrations = append(migrations, migration) } - + return migrations, nil } @@ -223,17 +223,17 @@ func (m *MigrationManager) ValidateSchema() error { if err != nil { return fmt.Errorf("failed to get current version: %w", err) } - + migrations := GetMigrations() if len(migrations) == 0 { return nil } - + latestVersion := migrations[len(migrations)-1].Version if currentVersion < latestVersion { return fmt.Errorf("schema is outdated: current version %d, latest version %d", currentVersion, latestVersion) } - + return nil } @@ -253,21 +253,21 @@ func getCurrentTimestamp() int64 { func CreateDatabase(dbPath string) (*sql.DB, error) { // TODO: Support different database types (PostgreSQL, MySQL, etc.) // ASSUMPTION: Using SQLite for now, can be extended for other databases - + db, err := sql.Open("sqlite3", dbPath) if err != nil { return nil, fmt.Errorf("failed to open database: %w", err) } - + // Configure SQLite for better performance pragmas := []string{ - "PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency - "PRAGMA synchronous=NORMAL", // Balance between safety and performance - "PRAGMA cache_size=10000", // Increase cache size - "PRAGMA foreign_keys=ON", // Enable foreign key constraints - "PRAGMA temp_store=MEMORY", // Store temporary tables in memory + "PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency + "PRAGMA synchronous=NORMAL", // Balance between safety and performance + "PRAGMA cache_size=10000", // Increase cache size + "PRAGMA foreign_keys=ON", // Enable foreign key constraints + "PRAGMA temp_store=MEMORY", // Store temporary tables in memory } - + for _, pragma := range pragmas { _, err := db.Exec(pragma) if err != nil { @@ -275,7 +275,7 @@ func CreateDatabase(dbPath string) (*sql.DB, error) { return nil, fmt.Errorf("failed to set pragma %s: %w", pragma, err) } } - + // Apply migrations migrationManager := NewMigrationManager(db) err = migrationManager.ApplyMigrations() @@ -283,7 +283,7 @@ func CreateDatabase(dbPath string) (*sql.DB, error) { db.Close() return nil, fmt.Errorf("failed to apply migrations: %w", err) } - + return db, nil } diff --git a/weed/mq/schema/flat_schema_utils_test.go b/weed/mq/schema/flat_schema_utils_test.go index 2bce9014c..779d3705f 100644 --- a/weed/mq/schema/flat_schema_utils_test.go +++ b/weed/mq/schema/flat_schema_utils_test.go @@ -248,11 +248,11 @@ func TestValidateKeyColumns(t *testing.T) { // Helper function to check if string contains substring func contains(str, substr string) bool { - return len(str) >= len(substr) && - (len(substr) == 0 || str[len(str)-len(substr):] == substr || - str[:len(substr)] == substr || - len(str) > len(substr) && (str[len(str)-len(substr)-1:len(str)-len(substr)] == " " || str[len(str)-len(substr)-1] == ' ') && str[len(str)-len(substr):] == substr || - findInString(str, substr)) + return len(str) >= len(substr) && + (len(substr) == 0 || str[len(str)-len(substr):] == substr || + str[:len(substr)] == substr || + len(str) > len(substr) && (str[len(str)-len(substr)-1:len(str)-len(substr)] == " " || str[len(str)-len(substr)-1] == ' ') && str[len(str)-len(substr):] == substr || + findInString(str, substr)) } func findInString(str, substr string) bool { diff --git a/weed/pb/mq_agent_pb/publish_response_test.go b/weed/pb/mq_agent_pb/publish_response_test.go index 1f2e767e4..0c7b0ee3a 100644 --- a/weed/pb/mq_agent_pb/publish_response_test.go +++ b/weed/pb/mq_agent_pb/publish_response_test.go @@ -1,8 +1,8 @@ package mq_agent_pb import ( - "testing" "google.golang.org/protobuf/proto" + "testing" ) func TestPublishRecordResponseSerialization(t *testing.T) { diff --git a/weed/pb/schema_pb/offset_test.go b/weed/pb/schema_pb/offset_test.go index 28324836e..273d2d5d1 100644 --- a/weed/pb/schema_pb/offset_test.go +++ b/weed/pb/schema_pb/offset_test.go @@ -1,8 +1,8 @@ package schema_pb import ( - "testing" "google.golang.org/protobuf/proto" + "testing" ) func TestOffsetTypeEnums(t *testing.T) { @@ -34,8 +34,8 @@ func TestPartitionOffsetSerialization(t *testing.T) { RangeStop: 31, UnixTimeNs: 1234567890, }, - StartTsNs: 1234567890, - StartOffset: 42, // New field + StartTsNs: 1234567890, + StartOffset: 42, // New field } // Test proto marshaling/unmarshaling diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index fcdad30ff..d0d664f74 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -525,6 +525,13 @@ message VolumeInfo { int64 dat_file_size = 5; // store the original dat file size uint64 expire_at_sec = 6; // expiration time of ec volume bool read_only = 7; + EcShardConfig ec_shard_config = 8; // EC shard configuration (optional, null = use default 10+4) +} + +// EcShardConfig specifies erasure coding shard configuration +message EcShardConfig { + uint32 data_shards = 1; // Number of data shards (e.g., 10) + uint32 parity_shards = 2; // Number of parity shards (e.g., 4) } message OldVersionVolumeInfo { repeated RemoteFile files = 1; diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 503db63ef..27e791be5 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -4442,6 +4442,7 @@ type VolumeInfo struct { DatFileSize int64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` // store the original dat file size ExpireAtSec uint64 `protobuf:"varint,6,opt,name=expire_at_sec,json=expireAtSec,proto3" json:"expire_at_sec,omitempty"` // expiration time of ec volume ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + EcShardConfig *EcShardConfig `protobuf:"bytes,8,opt,name=ec_shard_config,json=ecShardConfig,proto3" json:"ec_shard_config,omitempty"` // EC shard configuration (optional, null = use default 10+4) unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -4525,6 +4526,66 @@ func (x *VolumeInfo) GetReadOnly() bool { return false } +func (x *VolumeInfo) GetEcShardConfig() *EcShardConfig { + if x != nil { + return x.EcShardConfig + } + return nil +} + +// EcShardConfig specifies erasure coding shard configuration +type EcShardConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + DataShards uint32 `protobuf:"varint,1,opt,name=data_shards,json=dataShards,proto3" json:"data_shards,omitempty"` // Number of data shards (e.g., 10) + ParityShards uint32 `protobuf:"varint,2,opt,name=parity_shards,json=parityShards,proto3" json:"parity_shards,omitempty"` // Number of parity shards (e.g., 4) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EcShardConfig) Reset() { + *x = EcShardConfig{} + mi := &file_volume_server_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EcShardConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcShardConfig) ProtoMessage() {} + +func (x *EcShardConfig) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[80] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcShardConfig.ProtoReflect.Descriptor instead. +func (*EcShardConfig) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{80} +} + +func (x *EcShardConfig) GetDataShards() uint32 { + if x != nil { + return x.DataShards + } + return 0 +} + +func (x *EcShardConfig) GetParityShards() uint32 { + if x != nil { + return x.ParityShards + } + return 0 +} + type OldVersionVolumeInfo struct { state protoimpl.MessageState `protogen:"open.v1"` Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` @@ -4540,7 +4601,7 @@ type OldVersionVolumeInfo struct { func (x *OldVersionVolumeInfo) Reset() { *x = OldVersionVolumeInfo{} - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4552,7 +4613,7 @@ func (x *OldVersionVolumeInfo) String() string { func (*OldVersionVolumeInfo) ProtoMessage() {} func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[81] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4565,7 +4626,7 @@ func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use OldVersionVolumeInfo.ProtoReflect.Descriptor instead. func (*OldVersionVolumeInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{80} + return file_volume_server_proto_rawDescGZIP(), []int{81} } func (x *OldVersionVolumeInfo) GetFiles() []*RemoteFile { @@ -4630,7 +4691,7 @@ type VolumeTierMoveDatToRemoteRequest struct { func (x *VolumeTierMoveDatToRemoteRequest) Reset() { *x = VolumeTierMoveDatToRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4642,7 +4703,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) String() string { func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[82] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4655,7 +4716,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{81} + return file_volume_server_proto_rawDescGZIP(), []int{82} } func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { @@ -4696,7 +4757,7 @@ type VolumeTierMoveDatToRemoteResponse struct { func (x *VolumeTierMoveDatToRemoteResponse) Reset() { *x = VolumeTierMoveDatToRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4708,7 +4769,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) String() string { func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4721,7 +4782,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{82} + return file_volume_server_proto_rawDescGZIP(), []int{83} } func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { @@ -4749,7 +4810,7 @@ type VolumeTierMoveDatFromRemoteRequest struct { func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { *x = VolumeTierMoveDatFromRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4761,7 +4822,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) String() string { func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4774,7 +4835,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{83} + return file_volume_server_proto_rawDescGZIP(), []int{84} } func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { @@ -4808,7 +4869,7 @@ type VolumeTierMoveDatFromRemoteResponse struct { func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { *x = VolumeTierMoveDatFromRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4820,7 +4881,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) String() string { func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[85] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4833,7 +4894,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Messag // Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{84} + return file_volume_server_proto_rawDescGZIP(), []int{85} } func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { @@ -4858,7 +4919,7 @@ type VolumeServerStatusRequest struct { func (x *VolumeServerStatusRequest) Reset() { *x = VolumeServerStatusRequest{} - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4870,7 +4931,7 @@ func (x *VolumeServerStatusRequest) String() string { func (*VolumeServerStatusRequest) ProtoMessage() {} func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4883,7 +4944,7 @@ func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{85} + return file_volume_server_proto_rawDescGZIP(), []int{86} } type VolumeServerStatusResponse struct { @@ -4899,7 +4960,7 @@ type VolumeServerStatusResponse struct { func (x *VolumeServerStatusResponse) Reset() { *x = VolumeServerStatusResponse{} - mi := &file_volume_server_proto_msgTypes[86] + mi := &file_volume_server_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4911,7 +4972,7 @@ func (x *VolumeServerStatusResponse) String() string { func (*VolumeServerStatusResponse) ProtoMessage() {} func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[86] + mi := &file_volume_server_proto_msgTypes[87] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4924,7 +4985,7 @@ func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{86} + return file_volume_server_proto_rawDescGZIP(), []int{87} } func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { @@ -4970,7 +5031,7 @@ type VolumeServerLeaveRequest struct { func (x *VolumeServerLeaveRequest) Reset() { *x = VolumeServerLeaveRequest{} - mi := &file_volume_server_proto_msgTypes[87] + mi := &file_volume_server_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4982,7 +5043,7 @@ func (x *VolumeServerLeaveRequest) String() string { func (*VolumeServerLeaveRequest) ProtoMessage() {} func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[87] + mi := &file_volume_server_proto_msgTypes[88] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4995,7 +5056,7 @@ func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{87} + return file_volume_server_proto_rawDescGZIP(), []int{88} } type VolumeServerLeaveResponse struct { @@ -5006,7 +5067,7 @@ type VolumeServerLeaveResponse struct { func (x *VolumeServerLeaveResponse) Reset() { *x = VolumeServerLeaveResponse{} - mi := &file_volume_server_proto_msgTypes[88] + mi := &file_volume_server_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5018,7 +5079,7 @@ func (x *VolumeServerLeaveResponse) String() string { func (*VolumeServerLeaveResponse) ProtoMessage() {} func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[88] + mi := &file_volume_server_proto_msgTypes[89] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5031,7 +5092,7 @@ func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{88} + return file_volume_server_proto_rawDescGZIP(), []int{89} } // remote storage @@ -5053,7 +5114,7 @@ type FetchAndWriteNeedleRequest struct { func (x *FetchAndWriteNeedleRequest) Reset() { *x = FetchAndWriteNeedleRequest{} - mi := &file_volume_server_proto_msgTypes[89] + mi := &file_volume_server_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5065,7 +5126,7 @@ func (x *FetchAndWriteNeedleRequest) String() string { func (*FetchAndWriteNeedleRequest) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[89] + mi := &file_volume_server_proto_msgTypes[90] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5078,7 +5139,7 @@ func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleRequest.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{89} + return file_volume_server_proto_rawDescGZIP(), []int{90} } func (x *FetchAndWriteNeedleRequest) GetVolumeId() uint32 { @@ -5153,7 +5214,7 @@ type FetchAndWriteNeedleResponse struct { func (x *FetchAndWriteNeedleResponse) Reset() { *x = FetchAndWriteNeedleResponse{} - mi := &file_volume_server_proto_msgTypes[90] + mi := &file_volume_server_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5165,7 +5226,7 @@ func (x *FetchAndWriteNeedleResponse) String() string { func (*FetchAndWriteNeedleResponse) ProtoMessage() {} func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[90] + mi := &file_volume_server_proto_msgTypes[91] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5178,7 +5239,7 @@ func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleResponse.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{90} + return file_volume_server_proto_rawDescGZIP(), []int{91} } func (x *FetchAndWriteNeedleResponse) GetETag() string { @@ -5202,7 +5263,7 @@ type QueryRequest struct { func (x *QueryRequest) Reset() { *x = QueryRequest{} - mi := &file_volume_server_proto_msgTypes[91] + mi := &file_volume_server_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5214,7 +5275,7 @@ func (x *QueryRequest) String() string { func (*QueryRequest) ProtoMessage() {} func (x *QueryRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[91] + mi := &file_volume_server_proto_msgTypes[92] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5227,7 +5288,7 @@ func (x *QueryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. func (*QueryRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91} + return file_volume_server_proto_rawDescGZIP(), []int{92} } func (x *QueryRequest) GetSelections() []string { @@ -5274,7 +5335,7 @@ type QueriedStripe struct { func (x *QueriedStripe) Reset() { *x = QueriedStripe{} - mi := &file_volume_server_proto_msgTypes[92] + mi := &file_volume_server_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5286,7 +5347,7 @@ func (x *QueriedStripe) String() string { func (*QueriedStripe) ProtoMessage() {} func (x *QueriedStripe) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[92] + mi := &file_volume_server_proto_msgTypes[93] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5299,7 +5360,7 @@ func (x *QueriedStripe) ProtoReflect() protoreflect.Message { // Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. func (*QueriedStripe) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92} + return file_volume_server_proto_rawDescGZIP(), []int{93} } func (x *QueriedStripe) GetRecords() []byte { @@ -5319,7 +5380,7 @@ type VolumeNeedleStatusRequest struct { func (x *VolumeNeedleStatusRequest) Reset() { *x = VolumeNeedleStatusRequest{} - mi := &file_volume_server_proto_msgTypes[93] + mi := &file_volume_server_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5331,7 +5392,7 @@ func (x *VolumeNeedleStatusRequest) String() string { func (*VolumeNeedleStatusRequest) ProtoMessage() {} func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[93] + mi := &file_volume_server_proto_msgTypes[94] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5344,7 +5405,7 @@ func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{93} + return file_volume_server_proto_rawDescGZIP(), []int{94} } func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { @@ -5375,7 +5436,7 @@ type VolumeNeedleStatusResponse struct { func (x *VolumeNeedleStatusResponse) Reset() { *x = VolumeNeedleStatusResponse{} - mi := &file_volume_server_proto_msgTypes[94] + mi := &file_volume_server_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5387,7 +5448,7 @@ func (x *VolumeNeedleStatusResponse) String() string { func (*VolumeNeedleStatusResponse) ProtoMessage() {} func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[94] + mi := &file_volume_server_proto_msgTypes[95] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5400,7 +5461,7 @@ func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{94} + return file_volume_server_proto_rawDescGZIP(), []int{95} } func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { @@ -5455,7 +5516,7 @@ type PingRequest struct { func (x *PingRequest) Reset() { *x = PingRequest{} - mi := &file_volume_server_proto_msgTypes[95] + mi := &file_volume_server_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5467,7 +5528,7 @@ func (x *PingRequest) String() string { func (*PingRequest) ProtoMessage() {} func (x *PingRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[95] + mi := &file_volume_server_proto_msgTypes[96] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5480,7 +5541,7 @@ func (x *PingRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. func (*PingRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{95} + return file_volume_server_proto_rawDescGZIP(), []int{96} } func (x *PingRequest) GetTarget() string { @@ -5508,7 +5569,7 @@ type PingResponse struct { func (x *PingResponse) Reset() { *x = PingResponse{} - mi := &file_volume_server_proto_msgTypes[96] + mi := &file_volume_server_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5520,7 +5581,7 @@ func (x *PingResponse) String() string { func (*PingResponse) ProtoMessage() {} func (x *PingResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[96] + mi := &file_volume_server_proto_msgTypes[97] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5533,7 +5594,7 @@ func (x *PingResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. func (*PingResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{96} + return file_volume_server_proto_rawDescGZIP(), []int{97} } func (x *PingResponse) GetStartTimeNs() int64 { @@ -5568,7 +5629,7 @@ type FetchAndWriteNeedleRequest_Replica struct { func (x *FetchAndWriteNeedleRequest_Replica) Reset() { *x = FetchAndWriteNeedleRequest_Replica{} - mi := &file_volume_server_proto_msgTypes[97] + mi := &file_volume_server_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5580,7 +5641,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) String() string { func (*FetchAndWriteNeedleRequest_Replica) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[97] + mi := &file_volume_server_proto_msgTypes[98] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5593,7 +5654,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message // Deprecated: Use FetchAndWriteNeedleRequest_Replica.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest_Replica) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{89, 0} + return file_volume_server_proto_rawDescGZIP(), []int{90, 0} } func (x *FetchAndWriteNeedleRequest_Replica) GetUrl() string { @@ -5628,7 +5689,7 @@ type QueryRequest_Filter struct { func (x *QueryRequest_Filter) Reset() { *x = QueryRequest_Filter{} - mi := &file_volume_server_proto_msgTypes[98] + mi := &file_volume_server_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5640,7 +5701,7 @@ func (x *QueryRequest_Filter) String() string { func (*QueryRequest_Filter) ProtoMessage() {} func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[98] + mi := &file_volume_server_proto_msgTypes[99] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5653,7 +5714,7 @@ func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 0} + return file_volume_server_proto_rawDescGZIP(), []int{92, 0} } func (x *QueryRequest_Filter) GetField() string { @@ -5690,7 +5751,7 @@ type QueryRequest_InputSerialization struct { func (x *QueryRequest_InputSerialization) Reset() { *x = QueryRequest_InputSerialization{} - mi := &file_volume_server_proto_msgTypes[99] + mi := &file_volume_server_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5702,7 +5763,7 @@ func (x *QueryRequest_InputSerialization) String() string { func (*QueryRequest_InputSerialization) ProtoMessage() {} func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[99] + mi := &file_volume_server_proto_msgTypes[100] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5715,7 +5776,7 @@ func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1} } func (x *QueryRequest_InputSerialization) GetCompressionType() string { @@ -5756,7 +5817,7 @@ type QueryRequest_OutputSerialization struct { func (x *QueryRequest_OutputSerialization) Reset() { *x = QueryRequest_OutputSerialization{} - mi := &file_volume_server_proto_msgTypes[100] + mi := &file_volume_server_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5768,7 +5829,7 @@ func (x *QueryRequest_OutputSerialization) String() string { func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[100] + mi := &file_volume_server_proto_msgTypes[101] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5781,7 +5842,7 @@ func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2} + return file_volume_server_proto_rawDescGZIP(), []int{92, 2} } func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -5814,7 +5875,7 @@ type QueryRequest_InputSerialization_CSVInput struct { func (x *QueryRequest_InputSerialization_CSVInput) Reset() { *x = QueryRequest_InputSerialization_CSVInput{} - mi := &file_volume_server_proto_msgTypes[101] + mi := &file_volume_server_proto_msgTypes[102] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5826,7 +5887,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) String() string { func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[101] + mi := &file_volume_server_proto_msgTypes[102] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5839,7 +5900,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.M // Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 0} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 0} } func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -5900,7 +5961,7 @@ type QueryRequest_InputSerialization_JSONInput struct { func (x *QueryRequest_InputSerialization_JSONInput) Reset() { *x = QueryRequest_InputSerialization_JSONInput{} - mi := &file_volume_server_proto_msgTypes[102] + mi := &file_volume_server_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5912,7 +5973,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) String() string { func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[102] + mi := &file_volume_server_proto_msgTypes[103] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5925,7 +5986,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect. // Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 1} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 1} } func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -5943,7 +6004,7 @@ type QueryRequest_InputSerialization_ParquetInput struct { func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { *x = QueryRequest_InputSerialization_ParquetInput{} - mi := &file_volume_server_proto_msgTypes[103] + mi := &file_volume_server_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5955,7 +6016,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) String() string { func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[103] + mi := &file_volume_server_proto_msgTypes[104] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5968,7 +6029,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protorefle // Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 2} + return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { @@ -5984,7 +6045,7 @@ type QueryRequest_OutputSerialization_CSVOutput struct { func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { *x = QueryRequest_OutputSerialization_CSVOutput{} - mi := &file_volume_server_proto_msgTypes[104] + mi := &file_volume_server_proto_msgTypes[105] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5996,7 +6057,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[104] + mi := &file_volume_server_proto_msgTypes[105] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6009,7 +6070,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect // Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 0} + return file_volume_server_proto_rawDescGZIP(), []int{92, 2, 0} } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -6056,7 +6117,7 @@ type QueryRequest_OutputSerialization_JSONOutput struct { func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { *x = QueryRequest_OutputSerialization_JSONOutput{} - mi := &file_volume_server_proto_msgTypes[105] + mi := &file_volume_server_proto_msgTypes[106] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6068,7 +6129,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[105] + mi := &file_volume_server_proto_msgTypes[106] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6081,7 +6142,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflec // Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 1} + return file_volume_server_proto_rawDescGZIP(), []int{92, 2, 1} } func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -6423,7 +6484,7 @@ const file_volume_server_proto_rawDesc = "" + "\x06offset\x18\x04 \x01(\x04R\x06offset\x12\x1b\n" + "\tfile_size\x18\x05 \x01(\x04R\bfileSize\x12#\n" + "\rmodified_time\x18\x06 \x01(\x04R\fmodifiedTime\x12\x1c\n" + - "\textension\x18\a \x01(\tR\textension\"\x84\x02\n" + + "\textension\x18\a \x01(\tR\textension\"\xcd\x02\n" + "\n" + "VolumeInfo\x122\n" + "\x05files\x18\x01 \x03(\v2\x1c.volume_server_pb.RemoteFileR\x05files\x12\x18\n" + @@ -6432,7 +6493,12 @@ const file_volume_server_proto_rawDesc = "" + "\fbytes_offset\x18\x04 \x01(\rR\vbytesOffset\x12\"\n" + "\rdat_file_size\x18\x05 \x01(\x03R\vdatFileSize\x12\"\n" + "\rexpire_at_sec\x18\x06 \x01(\x04R\vexpireAtSec\x12\x1b\n" + - "\tread_only\x18\a \x01(\bR\breadOnly\"\x8b\x02\n" + + "\tread_only\x18\a \x01(\bR\breadOnly\x12G\n" + + "\x0fec_shard_config\x18\b \x01(\v2\x1f.volume_server_pb.EcShardConfigR\recShardConfig\"U\n" + + "\rEcShardConfig\x12\x1f\n" + + "\vdata_shards\x18\x01 \x01(\rR\n" + + "dataShards\x12#\n" + + "\rparity_shards\x18\x02 \x01(\rR\fparityShards\"\x8b\x02\n" + "\x14OldVersionVolumeInfo\x122\n" + "\x05files\x18\x01 \x03(\v2\x1c.volume_server_pb.RemoteFileR\x05files\x12\x18\n" + "\aversion\x18\x02 \x01(\rR\aversion\x12 \n" + @@ -6611,7 +6677,7 @@ func file_volume_server_proto_rawDescGZIP() []byte { return file_volume_server_proto_rawDescData } -var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 106) +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 107) var file_volume_server_proto_goTypes = []any{ (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse @@ -6693,34 +6759,35 @@ var file_volume_server_proto_goTypes = []any{ (*MemStatus)(nil), // 77: volume_server_pb.MemStatus (*RemoteFile)(nil), // 78: volume_server_pb.RemoteFile (*VolumeInfo)(nil), // 79: volume_server_pb.VolumeInfo - (*OldVersionVolumeInfo)(nil), // 80: volume_server_pb.OldVersionVolumeInfo - (*VolumeTierMoveDatToRemoteRequest)(nil), // 81: volume_server_pb.VolumeTierMoveDatToRemoteRequest - (*VolumeTierMoveDatToRemoteResponse)(nil), // 82: volume_server_pb.VolumeTierMoveDatToRemoteResponse - (*VolumeTierMoveDatFromRemoteRequest)(nil), // 83: volume_server_pb.VolumeTierMoveDatFromRemoteRequest - (*VolumeTierMoveDatFromRemoteResponse)(nil), // 84: volume_server_pb.VolumeTierMoveDatFromRemoteResponse - (*VolumeServerStatusRequest)(nil), // 85: volume_server_pb.VolumeServerStatusRequest - (*VolumeServerStatusResponse)(nil), // 86: volume_server_pb.VolumeServerStatusResponse - (*VolumeServerLeaveRequest)(nil), // 87: volume_server_pb.VolumeServerLeaveRequest - (*VolumeServerLeaveResponse)(nil), // 88: volume_server_pb.VolumeServerLeaveResponse - (*FetchAndWriteNeedleRequest)(nil), // 89: volume_server_pb.FetchAndWriteNeedleRequest - (*FetchAndWriteNeedleResponse)(nil), // 90: volume_server_pb.FetchAndWriteNeedleResponse - (*QueryRequest)(nil), // 91: volume_server_pb.QueryRequest - (*QueriedStripe)(nil), // 92: volume_server_pb.QueriedStripe - (*VolumeNeedleStatusRequest)(nil), // 93: volume_server_pb.VolumeNeedleStatusRequest - (*VolumeNeedleStatusResponse)(nil), // 94: volume_server_pb.VolumeNeedleStatusResponse - (*PingRequest)(nil), // 95: volume_server_pb.PingRequest - (*PingResponse)(nil), // 96: volume_server_pb.PingResponse - (*FetchAndWriteNeedleRequest_Replica)(nil), // 97: volume_server_pb.FetchAndWriteNeedleRequest.Replica - (*QueryRequest_Filter)(nil), // 98: volume_server_pb.QueryRequest.Filter - (*QueryRequest_InputSerialization)(nil), // 99: volume_server_pb.QueryRequest.InputSerialization - (*QueryRequest_OutputSerialization)(nil), // 100: volume_server_pb.QueryRequest.OutputSerialization - (*QueryRequest_InputSerialization_CSVInput)(nil), // 101: volume_server_pb.QueryRequest.InputSerialization.CSVInput - (*QueryRequest_InputSerialization_JSONInput)(nil), // 102: volume_server_pb.QueryRequest.InputSerialization.JSONInput - (*QueryRequest_InputSerialization_ParquetInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.ParquetInput - (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 104: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 105: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - (*remote_pb.RemoteConf)(nil), // 106: remote_pb.RemoteConf - (*remote_pb.RemoteStorageLocation)(nil), // 107: remote_pb.RemoteStorageLocation + (*EcShardConfig)(nil), // 80: volume_server_pb.EcShardConfig + (*OldVersionVolumeInfo)(nil), // 81: volume_server_pb.OldVersionVolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 82: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 83: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 84: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 85: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 86: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 87: volume_server_pb.VolumeServerStatusResponse + (*VolumeServerLeaveRequest)(nil), // 88: volume_server_pb.VolumeServerLeaveRequest + (*VolumeServerLeaveResponse)(nil), // 89: volume_server_pb.VolumeServerLeaveResponse + (*FetchAndWriteNeedleRequest)(nil), // 90: volume_server_pb.FetchAndWriteNeedleRequest + (*FetchAndWriteNeedleResponse)(nil), // 91: volume_server_pb.FetchAndWriteNeedleResponse + (*QueryRequest)(nil), // 92: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 93: volume_server_pb.QueriedStripe + (*VolumeNeedleStatusRequest)(nil), // 94: volume_server_pb.VolumeNeedleStatusRequest + (*VolumeNeedleStatusResponse)(nil), // 95: volume_server_pb.VolumeNeedleStatusResponse + (*PingRequest)(nil), // 96: volume_server_pb.PingRequest + (*PingResponse)(nil), // 97: volume_server_pb.PingResponse + (*FetchAndWriteNeedleRequest_Replica)(nil), // 98: volume_server_pb.FetchAndWriteNeedleRequest.Replica + (*QueryRequest_Filter)(nil), // 99: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 100: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 101: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 102: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 104: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 105: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 106: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + (*remote_pb.RemoteConf)(nil), // 107: remote_pb.RemoteConf + (*remote_pb.RemoteStorageLocation)(nil), // 108: remote_pb.RemoteStorageLocation } var file_volume_server_proto_depIdxs = []int32{ 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult @@ -6728,113 +6795,114 @@ var file_volume_server_proto_depIdxs = []int32{ 73, // 2: volume_server_pb.VolumeEcShardsInfoResponse.ec_shard_infos:type_name -> volume_server_pb.EcShardInfo 79, // 3: volume_server_pb.ReadVolumeFileStatusResponse.volume_info:type_name -> volume_server_pb.VolumeInfo 78, // 4: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 78, // 5: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 76, // 6: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus - 77, // 7: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus - 97, // 8: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica - 106, // 9: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf - 107, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation - 98, // 11: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter - 99, // 12: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization - 100, // 13: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization - 101, // 14: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput - 102, // 15: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput - 103, // 16: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput - 104, // 17: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - 105, // 18: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - 0, // 19: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest - 4, // 20: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest - 6, // 21: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest - 8, // 22: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest - 10, // 23: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest - 12, // 24: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest - 14, // 25: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest - 16, // 26: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest - 18, // 27: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest - 20, // 28: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest - 22, // 29: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest - 24, // 30: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest - 26, // 31: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest - 28, // 32: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest - 30, // 33: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest - 32, // 34: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest - 34, // 35: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest - 74, // 36: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest - 36, // 37: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest - 38, // 38: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest - 41, // 39: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest - 43, // 40: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest - 45, // 41: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest - 47, // 42: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest - 49, // 43: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest - 51, // 44: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest - 53, // 45: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest - 55, // 46: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest - 57, // 47: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest - 59, // 48: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest - 61, // 49: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest - 63, // 50: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest - 65, // 51: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest - 67, // 52: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest - 69, // 53: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest - 71, // 54: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest - 81, // 55: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest - 83, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest - 85, // 57: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest - 87, // 58: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest - 89, // 59: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest - 91, // 60: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest - 93, // 61: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest - 95, // 62: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest - 1, // 63: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse - 5, // 64: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse - 7, // 65: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse - 9, // 66: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse - 11, // 67: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse - 13, // 68: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse - 15, // 69: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse - 17, // 70: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse - 19, // 71: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse - 21, // 72: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse - 23, // 73: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse - 25, // 74: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse - 27, // 75: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse - 29, // 76: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse - 31, // 77: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse - 33, // 78: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse - 35, // 79: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse - 75, // 80: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse - 37, // 81: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse - 40, // 82: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse - 42, // 83: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse - 44, // 84: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse - 46, // 85: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse - 48, // 86: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse - 50, // 87: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse - 52, // 88: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse - 54, // 89: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse - 56, // 90: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse - 58, // 91: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse - 60, // 92: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse - 62, // 93: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse - 64, // 94: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse - 66, // 95: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse - 68, // 96: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse - 70, // 97: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse - 72, // 98: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse - 82, // 99: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse - 84, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse - 86, // 101: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse - 88, // 102: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse - 90, // 103: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse - 92, // 104: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe - 94, // 105: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse - 96, // 106: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse - 63, // [63:107] is the sub-list for method output_type - 19, // [19:63] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 80, // 5: volume_server_pb.VolumeInfo.ec_shard_config:type_name -> volume_server_pb.EcShardConfig + 78, // 6: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 76, // 7: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 77, // 8: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 98, // 9: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica + 107, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf + 108, // 11: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation + 99, // 12: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 100, // 13: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 101, // 14: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 102, // 15: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 103, // 16: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 104, // 17: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 105, // 18: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 106, // 19: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 0, // 20: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest + 4, // 21: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest + 6, // 22: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest + 8, // 23: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest + 10, // 24: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest + 12, // 25: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest + 14, // 26: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest + 16, // 27: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest + 18, // 28: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest + 20, // 29: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest + 22, // 30: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest + 24, // 31: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest + 26, // 32: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest + 28, // 33: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest + 30, // 34: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest + 32, // 35: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest + 34, // 36: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest + 74, // 37: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 36, // 38: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest + 38, // 39: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest + 41, // 40: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest + 43, // 41: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest + 45, // 42: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest + 47, // 43: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest + 49, // 44: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 51, // 45: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 53, // 46: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 55, // 47: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 57, // 48: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 59, // 49: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 61, // 50: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 63, // 51: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 65, // 52: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 67, // 53: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 69, // 54: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 71, // 55: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest + 82, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 84, // 57: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 86, // 58: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 88, // 59: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest + 90, // 60: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest + 92, // 61: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 94, // 62: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest + 96, // 63: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest + 1, // 64: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 5, // 65: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 7, // 66: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 9, // 67: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 11, // 68: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 13, // 69: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 15, // 70: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 17, // 71: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 19, // 72: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 21, // 73: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 23, // 74: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 25, // 75: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 27, // 76: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 29, // 77: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse + 31, // 78: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 33, // 79: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse + 35, // 80: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 75, // 81: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 37, // 82: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 40, // 83: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse + 42, // 84: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse + 44, // 85: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse + 46, // 86: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse + 48, // 87: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse + 50, // 88: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 52, // 89: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 54, // 90: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 56, // 91: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 58, // 92: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 60, // 93: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 62, // 94: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 64, // 95: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 66, // 96: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 68, // 97: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 70, // 98: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 72, // 99: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse + 83, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 85, // 101: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 87, // 102: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 89, // 103: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse + 91, // 104: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse + 93, // 105: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 95, // 106: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse + 97, // 107: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse + 64, // [64:108] is the sub-list for method output_type + 20, // [20:64] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_volume_server_proto_init() } @@ -6852,7 +6920,7 @@ func file_volume_server_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_volume_server_proto_rawDesc), len(file_volume_server_proto_rawDesc)), NumEnums: 0, - NumMessages: 106, + NumMessages: 107, NumExtensions: 0, NumServices: 1, }, diff --git a/weed/remote_storage/azure/azure_storage_client_test.go b/weed/remote_storage/azure/azure_storage_client_test.go index acb7dbd17..f57a4c6df 100644 --- a/weed/remote_storage/azure/azure_storage_client_test.go +++ b/weed/remote_storage/azure/azure_storage_client_test.go @@ -229,22 +229,22 @@ func TestToMetadata(t *testing.T) { s3_constants.AmzUserMetaPrefix + "789": []byte("value3"), }, expected: map[string]*string{ - "_123key": stringPtr("value1"), // starts with digit -> prefix _ - "_456_2d_test": stringPtr("value2"), // starts with digit AND has dash - "_789": stringPtr("value3"), + "_123key": stringPtr("value1"), // starts with digit -> prefix _ + "_456_2d_test": stringPtr("value2"), // starts with digit AND has dash + "_789": stringPtr("value3"), }, }, { name: "uppercase and mixed case keys", input: map[string][]byte{ - s3_constants.AmzUserMetaPrefix + "My-Key": []byte("value1"), - s3_constants.AmzUserMetaPrefix + "UPPERCASE": []byte("value2"), - s3_constants.AmzUserMetaPrefix + "MiXeD-CaSe": []byte("value3"), + s3_constants.AmzUserMetaPrefix + "My-Key": []byte("value1"), + s3_constants.AmzUserMetaPrefix + "UPPERCASE": []byte("value2"), + s3_constants.AmzUserMetaPrefix + "MiXeD-CaSe": []byte("value3"), }, expected: map[string]*string{ - "my_2d_key": stringPtr("value1"), // lowercase + dash -> _2d_ - "uppercase": stringPtr("value2"), - "mixed_2d_case": stringPtr("value3"), + "my_2d_key": stringPtr("value1"), // lowercase + dash -> _2d_ + "uppercase": stringPtr("value2"), + "mixed_2d_case": stringPtr("value3"), }, }, { diff --git a/weed/s3api/s3_sse_s3_integration_test.go b/weed/s3api/s3_sse_s3_integration_test.go index 8232aea7f..4e0d91a5c 100644 --- a/weed/s3api/s3_sse_s3_integration_test.go +++ b/weed/s3api/s3_sse_s3_integration_test.go @@ -78,7 +78,7 @@ func TestSSES3EndToEndSmallFile(t *testing.T) { // Step 3: Decrypt (simulates what happens during GET) // This tests the IV retrieval path for inline files - + // First, deserialize metadata from storage retrievedKeyData := mockEntry.Extended[s3_constants.SeaweedFSSSES3Key] retrievedKey, err := DeserializeSSES3Metadata(retrievedKeyData, keyManager) diff --git a/weed/s3api/s3_validation_utils.go b/weed/s3api/s3_validation_utils.go index e0e80d0a8..f69fc9c26 100644 --- a/weed/s3api/s3_validation_utils.go +++ b/weed/s3api/s3_validation_utils.go @@ -71,7 +71,7 @@ func ValidateSSES3Key(sseKey *SSES3Key) error { if sseKey == nil { return fmt.Errorf("SSE-S3 key cannot be nil") } - + // Validate key bytes if sseKey.Key == nil { return fmt.Errorf("SSE-S3 key bytes cannot be nil") @@ -79,22 +79,22 @@ func ValidateSSES3Key(sseKey *SSES3Key) error { if len(sseKey.Key) != SSES3KeySize { return fmt.Errorf("invalid SSE-S3 key size: expected %d bytes, got %d", SSES3KeySize, len(sseKey.Key)) } - + // Validate algorithm if sseKey.Algorithm != SSES3Algorithm { return fmt.Errorf("invalid SSE-S3 algorithm: expected %q, got %q", SSES3Algorithm, sseKey.Algorithm) } - + // Validate key ID (should not be empty) if sseKey.KeyID == "" { return fmt.Errorf("SSE-S3 key ID cannot be empty") } - + // IV validation is optional during key creation - it will be set during encryption // If IV is set, validate its length if len(sseKey.IV) > 0 && len(sseKey.IV) != s3_constants.AESBlockSize { return fmt.Errorf("invalid SSE-S3 IV length: expected %d bytes, got %d", s3_constants.AESBlockSize, len(sseKey.IV)) } - + return nil } diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 88e94115d..5d100bdda 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -50,20 +50,38 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) } + // Create EC context - prefer existing .vif config if present (for regeneration scenarios) + ecCtx := erasure_coding.NewDefaultECContext(req.Collection, needle.VolumeId(req.VolumeId)) + if volumeInfo, _, found, _ := volume_info.MaybeLoadVolumeInfo(baseFileName + ".vif"); found && volumeInfo.EcShardConfig != nil { + ds := int(volumeInfo.EcShardConfig.DataShards) + ps := int(volumeInfo.EcShardConfig.ParityShards) + + // Validate and use existing EC config + if ds > 0 && ps > 0 && ds+ps <= erasure_coding.MaxShardCount { + ecCtx.DataShards = ds + ecCtx.ParityShards = ps + glog.V(0).Infof("Using existing EC config for volume %d: %s", req.VolumeId, ecCtx.String()) + } else { + glog.Warningf("Invalid EC config in .vif for volume %d (data=%d, parity=%d), using defaults", req.VolumeId, ds, ps) + } + } else { + glog.V(0).Infof("Using default EC config for volume %d: %s", req.VolumeId, ecCtx.String()) + } + shouldCleanup := true defer func() { if !shouldCleanup { return } - for i := 0; i < erasure_coding.TotalShardsCount; i++ { - os.Remove(fmt.Sprintf("%s.ec%2d", baseFileName, i)) + for i := 0; i < ecCtx.Total(); i++ { + os.Remove(baseFileName + ecCtx.ToExt(i)) } os.Remove(v.IndexFileName() + ".ecx") }() - // write .ec00 ~ .ec13 files - if err := erasure_coding.WriteEcFiles(baseFileName); err != nil { - return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + // write .ec00 ~ .ec[TotalShards-1] files using context + if err := erasure_coding.WriteEcFilesWithContext(baseFileName, ecCtx); err != nil { + return nil, fmt.Errorf("WriteEcFilesWithContext %s: %v", baseFileName, err) } // write .ecx file @@ -84,6 +102,21 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ datSize, _, _ := v.FileStat() volumeInfo.DatFileSize = int64(datSize) + + // Validate EC configuration before saving to .vif + if ecCtx.DataShards <= 0 || ecCtx.ParityShards <= 0 || ecCtx.Total() > erasure_coding.MaxShardCount { + return nil, fmt.Errorf("invalid EC config before saving: data=%d, parity=%d, total=%d (max=%d)", + ecCtx.DataShards, ecCtx.ParityShards, ecCtx.Total(), erasure_coding.MaxShardCount) + } + + // Save EC configuration to VolumeInfo + volumeInfo.EcShardConfig = &volume_server_pb.EcShardConfig{ + DataShards: uint32(ecCtx.DataShards), + ParityShards: uint32(ecCtx.ParityShards), + } + glog.V(1).Infof("Saving EC config to .vif for volume %d: %d+%d (total: %d)", + req.VolumeId, ecCtx.DataShards, ecCtx.ParityShards, ecCtx.Total()) + if err := volume_info.SaveVolumeInfo(baseFileName+".vif", volumeInfo); err != nil { return nil, fmt.Errorf("SaveVolumeInfo %s: %v", baseFileName, err) } @@ -442,9 +475,10 @@ func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_ glog.V(0).Infof("VolumeEcShardsToVolume: %v", req) - // collect .ec00 ~ .ec09 files - shardFileNames := make([]string, erasure_coding.DataShardsCount) - v, found := vs.store.CollectEcShards(needle.VolumeId(req.VolumeId), shardFileNames) + // Collect all EC shards (NewEcVolume will load EC config from .vif into v.ECContext) + // Use MaxShardCount (32) to support custom EC ratios up to 32 total shards + tempShards := make([]string, erasure_coding.MaxShardCount) + v, found := vs.store.CollectEcShards(needle.VolumeId(req.VolumeId), tempShards) if !found { return nil, fmt.Errorf("ec volume %d not found", req.VolumeId) } @@ -453,7 +487,19 @@ func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_ return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) } - for shardId := 0; shardId < erasure_coding.DataShardsCount; shardId++ { + // Use EC context (already loaded from .vif) to determine data shard count + dataShards := v.ECContext.DataShards + + // Defensive validation to prevent panics from corrupted ECContext + if dataShards <= 0 || dataShards > erasure_coding.MaxShardCount { + return nil, fmt.Errorf("invalid data shard count %d for volume %d (must be 1..%d)", dataShards, req.VolumeId, erasure_coding.MaxShardCount) + } + + shardFileNames := tempShards[:dataShards] + glog.V(1).Infof("Using EC config from volume %d: %d data shards", req.VolumeId, dataShards) + + // Verify all data shards are present + for shardId := 0; shardId < dataShards; shardId++ { if shardFileNames[shardId] == "" { return nil, fmt.Errorf("ec volume %d missing shard %d", req.VolumeId, shardId) } diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index 665daa1b8..f059b4e74 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -622,7 +622,8 @@ func (ecb *ecBalancer) deleteDuplicatedEcShards(collection string) error { func (ecb *ecBalancer) doDeduplicateEcShards(collection string, vid needle.VolumeId, locations []*EcNode) error { // check whether this volume has ecNodes that are over average - shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount) + // Use MaxShardCount (32) to support custom EC ratios + shardToLocations := make([][]*EcNode, erasure_coding.MaxShardCount) for _, ecNode := range locations { shardBits := findEcVolumeShards(ecNode, vid) for _, shardId := range shardBits.ShardIds() { @@ -677,11 +678,16 @@ func countShardsByRack(vid needle.VolumeId, locations []*EcNode) map[string]int func (ecb *ecBalancer) doBalanceEcShardsAcrossRacks(collection string, vid needle.VolumeId, locations []*EcNode) error { racks := ecb.racks() - // calculate average number of shards an ec rack should have for one volume - averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks)) - // see the volume's shards are in how many racks, and how many in each rack rackToShardCount := countShardsByRack(vid, locations) + + // Calculate actual total shards for this volume (not hardcoded default) + var totalShardsForVolume int + for _, count := range rackToShardCount { + totalShardsForVolume += count + } + // calculate average number of shards an ec rack should have for one volume + averageShardsPerEcRack := ceilDivide(totalShardsForVolume, len(racks)) rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string { return string(ecNode.rack) }) diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index 8cae77434..cceaa1899 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -264,7 +264,8 @@ func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) { if shardInfo.Collection == collection { existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)] if !found { - existing = make([][]*EcNode, erasure_coding.TotalShardsCount) + // Use MaxShardCount (32) to support custom EC ratios + existing = make([][]*EcNode, erasure_coding.MaxShardCount) ecShardMap[needle.VolumeId(shardInfo.Id)] = existing } for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() { diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index 128bfd26f..b370555da 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -16,7 +16,9 @@ import ( ) var ( - re = regexp.MustCompile(`\.ec[0-9][0-9]`) + // Match .ec00 through .ec999 (currently only .ec00-.ec31 are used) + // Using \d{2,3} for future-proofing if MaxShardCount is ever increased beyond 99 + re = regexp.MustCompile(`\.ec\d{2,3}`) ) func (l *DiskLocation) FindEcVolume(vid needle.VolumeId) (*erasure_coding.EcVolume, bool) { @@ -398,8 +400,8 @@ func (l *DiskLocation) validateEcVolume(collection string, vid needle.VolumeId) var actualShardSize int64 = -1 // Count shards and validate they all have the same size (required for Reed-Solomon EC) - // Shard files (.ec00 - .ec13) are always in l.Directory, not l.IdxDirectory - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + // Check up to MaxShardCount (32) to support custom EC ratios + for i := 0; i < erasure_coding.MaxShardCount; i++ { shardFileName := baseFileName + erasure_coding.ToExt(i) fi, err := os.Stat(shardFileName) @@ -472,8 +474,9 @@ func (l *DiskLocation) removeEcVolumeFiles(collection string, vid needle.VolumeI removeFile(indexBaseFileName+".ecx", "EC index file") removeFile(indexBaseFileName+".ecj", "EC journal file") - // Remove all EC shard files (.ec00 ~ .ec13) from data directory - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + // Remove all EC shard files (.ec00 ~ .ec31) from data directory + // Use MaxShardCount (32) to support custom EC ratios + for i := 0; i < erasure_coding.MaxShardCount; i++ { removeFile(baseFileName+erasure_coding.ToExt(i), "EC shard file") } } diff --git a/weed/storage/erasure_coding/ec_context.go b/weed/storage/erasure_coding/ec_context.go new file mode 100644 index 000000000..770fe41af --- /dev/null +++ b/weed/storage/erasure_coding/ec_context.go @@ -0,0 +1,46 @@ +package erasure_coding + +import ( + "fmt" + + "github.com/klauspost/reedsolomon" + "github.com/seaweedfs/seaweedfs/weed/storage/needle" +) + +// ECContext encapsulates erasure coding parameters for encoding/decoding operations +type ECContext struct { + DataShards int + ParityShards int + Collection string + VolumeId needle.VolumeId +} + +// Total returns the total number of shards (data + parity) +func (ctx *ECContext) Total() int { + return ctx.DataShards + ctx.ParityShards +} + +// NewDefaultECContext creates a context with default 10+4 shard configuration +func NewDefaultECContext(collection string, volumeId needle.VolumeId) *ECContext { + return &ECContext{ + DataShards: DataShardsCount, + ParityShards: ParityShardsCount, + Collection: collection, + VolumeId: volumeId, + } +} + +// CreateEncoder creates a Reed-Solomon encoder for this context +func (ctx *ECContext) CreateEncoder() (reedsolomon.Encoder, error) { + return reedsolomon.New(ctx.DataShards, ctx.ParityShards) +} + +// ToExt returns the file extension for a given shard index +func (ctx *ECContext) ToExt(shardIndex int) string { + return fmt.Sprintf(".ec%02d", shardIndex) +} + +// String returns a human-readable representation of the EC configuration +func (ctx *ECContext) String() string { + return fmt.Sprintf("%d+%d (total: %d)", ctx.DataShards, ctx.ParityShards, ctx.Total()) +} diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index eeeb156e6..81ebffdcb 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -11,6 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" "github.com/seaweedfs/seaweedfs/weed/storage/types" + "github.com/seaweedfs/seaweedfs/weed/storage/volume_info" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -18,6 +19,7 @@ const ( DataShardsCount = 10 ParityShardsCount = 4 TotalShardsCount = DataShardsCount + ParityShardsCount + MaxShardCount = 32 // Maximum number of shards since ShardBits is uint32 (bits 0-31) MinTotalDisks = TotalShardsCount/ParityShardsCount + 1 ErasureCodingLargeBlockSize = 1024 * 1024 * 1024 // 1GB ErasureCodingSmallBlockSize = 1024 * 1024 // 1MB @@ -54,20 +56,53 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { return nil } -// WriteEcFiles generates .ec00 ~ .ec13 files +// WriteEcFiles generates .ec00 ~ .ec13 files using default EC context func WriteEcFiles(baseFileName string) error { - return generateEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize) + ctx := NewDefaultECContext("", 0) + return WriteEcFilesWithContext(baseFileName, ctx) +} + +// WriteEcFilesWithContext generates EC files using the provided context +func WriteEcFilesWithContext(baseFileName string, ctx *ECContext) error { + return generateEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, ctx) } func RebuildEcFiles(baseFileName string) ([]uint32, error) { - return generateMissingEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize) + // Attempt to load EC config from .vif file to preserve original configuration + var ctx *ECContext + if volumeInfo, _, found, _ := volume_info.MaybeLoadVolumeInfo(baseFileName + ".vif"); found && volumeInfo.EcShardConfig != nil { + ds := int(volumeInfo.EcShardConfig.DataShards) + ps := int(volumeInfo.EcShardConfig.ParityShards) + + // Validate EC config before using it + if ds > 0 && ps > 0 && ds+ps <= MaxShardCount { + ctx = &ECContext{ + DataShards: ds, + ParityShards: ps, + } + glog.V(0).Infof("Rebuilding EC files for %s with config from .vif: %s", baseFileName, ctx.String()) + } else { + glog.Warningf("Invalid EC config in .vif for %s (data=%d, parity=%d), using default", baseFileName, ds, ps) + ctx = NewDefaultECContext("", 0) + } + } else { + glog.V(0).Infof("Rebuilding EC files for %s with default config", baseFileName) + ctx = NewDefaultECContext("", 0) + } + + return RebuildEcFilesWithContext(baseFileName, ctx) +} + +// RebuildEcFilesWithContext rebuilds missing EC files using the provided context +func RebuildEcFilesWithContext(baseFileName string, ctx *ECContext) ([]uint32, error) { + return generateMissingEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, ctx) } func ToExt(ecIndex int) string { return fmt.Sprintf(".ec%02d", ecIndex) } -func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) error { +func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64, ctx *ECContext) error { file, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0) if err != nil { return fmt.Errorf("failed to open dat file: %w", err) @@ -79,21 +114,21 @@ func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, return fmt.Errorf("failed to stat dat file: %w", err) } - glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size()) - err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize) + glog.V(0).Infof("encodeDatFile %s.dat size:%d with EC context %s", baseFileName, fi.Size(), ctx.String()) + err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize, ctx) if err != nil { return fmt.Errorf("encodeDatFile: %w", err) } return nil } -func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64) (generatedShardIds []uint32, err error) { +func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, smallBlockSize int64, ctx *ECContext) (generatedShardIds []uint32, err error) { - shardHasData := make([]bool, TotalShardsCount) - inputFiles := make([]*os.File, TotalShardsCount) - outputFiles := make([]*os.File, TotalShardsCount) - for shardId := 0; shardId < TotalShardsCount; shardId++ { - shardFileName := baseFileName + ToExt(shardId) + shardHasData := make([]bool, ctx.Total()) + inputFiles := make([]*os.File, ctx.Total()) + outputFiles := make([]*os.File, ctx.Total()) + for shardId := 0; shardId < ctx.Total(); shardId++ { + shardFileName := baseFileName + ctx.ToExt(shardId) if util.FileExists(shardFileName) { shardHasData[shardId] = true inputFiles[shardId], err = os.OpenFile(shardFileName, os.O_RDONLY, 0) @@ -111,14 +146,14 @@ func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize } } - err = rebuildEcFiles(shardHasData, inputFiles, outputFiles) + err = rebuildEcFiles(shardHasData, inputFiles, outputFiles, ctx) if err != nil { return nil, fmt.Errorf("rebuildEcFiles: %w", err) } return } -func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error { +func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File, ctx *ECContext) error { bufferSize := int64(len(buffers[0])) if bufferSize == 0 { @@ -131,7 +166,7 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i } for b := int64(0); b < batchCount; b++ { - err := encodeDataOneBatch(file, enc, startOffset+b*bufferSize, blockSize, buffers, outputs) + err := encodeDataOneBatch(file, enc, startOffset+b*bufferSize, blockSize, buffers, outputs, ctx) if err != nil { return err } @@ -140,9 +175,9 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i return nil } -func openEcFiles(baseFileName string, forRead bool) (files []*os.File, err error) { - for i := 0; i < TotalShardsCount; i++ { - fname := baseFileName + ToExt(i) +func openEcFiles(baseFileName string, forRead bool, ctx *ECContext) (files []*os.File, err error) { + for i := 0; i < ctx.Total(); i++ { + fname := baseFileName + ctx.ToExt(i) openOption := os.O_TRUNC | os.O_CREATE | os.O_WRONLY if forRead { openOption = os.O_RDONLY @@ -164,10 +199,10 @@ func closeEcFiles(files []*os.File) { } } -func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error { +func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File, ctx *ECContext) error { // read data into buffers - for i := 0; i < DataShardsCount; i++ { + for i := 0; i < ctx.DataShards; i++ { n, err := file.ReadAt(buffers[i], startOffset+blockSize*int64(i)) if err != nil { if err != io.EOF { @@ -186,7 +221,7 @@ func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blo return err } - for i := 0; i < TotalShardsCount; i++ { + for i := 0; i < ctx.Total(); i++ { _, err := outputs[i].Write(buffers[i]) if err != nil { return err @@ -196,53 +231,57 @@ func encodeDataOneBatch(file *os.File, enc reedsolomon.Encoder, startOffset, blo return nil } -func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, largeBlockSize int64, file *os.File, smallBlockSize int64) error { +func encodeDatFile(remainingSize int64, baseFileName string, bufferSize int, largeBlockSize int64, file *os.File, smallBlockSize int64, ctx *ECContext) error { var processedSize int64 - enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount) + enc, err := ctx.CreateEncoder() if err != nil { return fmt.Errorf("failed to create encoder: %w", err) } - buffers := make([][]byte, TotalShardsCount) + buffers := make([][]byte, ctx.Total()) for i := range buffers { buffers[i] = make([]byte, bufferSize) } - outputs, err := openEcFiles(baseFileName, false) + outputs, err := openEcFiles(baseFileName, false, ctx) defer closeEcFiles(outputs) if err != nil { return fmt.Errorf("failed to open ec files %s: %v", baseFileName, err) } - for remainingSize > largeBlockSize*DataShardsCount { - err = encodeData(file, enc, processedSize, largeBlockSize, buffers, outputs) + // Pre-calculate row sizes to avoid redundant calculations in loops + largeRowSize := largeBlockSize * int64(ctx.DataShards) + smallRowSize := smallBlockSize * int64(ctx.DataShards) + + for remainingSize >= largeRowSize { + err = encodeData(file, enc, processedSize, largeBlockSize, buffers, outputs, ctx) if err != nil { return fmt.Errorf("failed to encode large chunk data: %w", err) } - remainingSize -= largeBlockSize * DataShardsCount - processedSize += largeBlockSize * DataShardsCount + remainingSize -= largeRowSize + processedSize += largeRowSize } for remainingSize > 0 { - err = encodeData(file, enc, processedSize, smallBlockSize, buffers, outputs) + err = encodeData(file, enc, processedSize, smallBlockSize, buffers, outputs, ctx) if err != nil { return fmt.Errorf("failed to encode small chunk data: %w", err) } - remainingSize -= smallBlockSize * DataShardsCount - processedSize += smallBlockSize * DataShardsCount + remainingSize -= smallRowSize + processedSize += smallRowSize } return nil } -func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*os.File) error { +func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*os.File, ctx *ECContext) error { - enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount) + enc, err := ctx.CreateEncoder() if err != nil { return fmt.Errorf("failed to create encoder: %w", err) } - buffers := make([][]byte, TotalShardsCount) + buffers := make([][]byte, ctx.Total()) for i := range buffers { if shardHasData[i] { buffers[i] = make([]byte, ErasureCodingSmallBlockSize) @@ -254,7 +293,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o for { // read the input data from files - for i := 0; i < TotalShardsCount; i++ { + for i := 0; i < ctx.Total(); i++ { if shardHasData[i] { n, _ := inputFiles[i].ReadAt(buffers[i], startOffset) if n == 0 { @@ -278,7 +317,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } // write the data to output files - for i := 0; i < TotalShardsCount; i++ { + for i := 0; i < ctx.Total(); i++ { if !shardHasData[i] { n, _ := outputFiles[i].WriteAt(buffers[i][:inputBufferDataSize], startOffset) if inputBufferDataSize != n { diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go index b1cc9c441..cbb20832c 100644 --- a/weed/storage/erasure_coding/ec_test.go +++ b/weed/storage/erasure_coding/ec_test.go @@ -23,7 +23,10 @@ func TestEncodingDecoding(t *testing.T) { bufferSize := 50 baseFileName := "1" - err := generateEcFiles(baseFileName, bufferSize, largeBlockSize, smallBlockSize) + // Create default EC context for testing + ctx := NewDefaultECContext("", 0) + + err := generateEcFiles(baseFileName, bufferSize, largeBlockSize, smallBlockSize, ctx) if err != nil { t.Logf("generateEcFiles: %v", err) } @@ -33,16 +36,16 @@ func TestEncodingDecoding(t *testing.T) { t.Logf("WriteSortedFileFromIdx: %v", err) } - err = validateFiles(baseFileName) + err = validateFiles(baseFileName, ctx) if err != nil { t.Logf("WriteSortedFileFromIdx: %v", err) } - removeGeneratedFiles(baseFileName) + removeGeneratedFiles(baseFileName, ctx) } -func validateFiles(baseFileName string) error { +func validateFiles(baseFileName string, ctx *ECContext) error { nm, err := readNeedleMap(baseFileName) if err != nil { return fmt.Errorf("readNeedleMap: %v", err) @@ -60,7 +63,7 @@ func validateFiles(baseFileName string) error { return fmt.Errorf("failed to stat dat file: %v", err) } - ecFiles, err := openEcFiles(baseFileName, true) + ecFiles, err := openEcFiles(baseFileName, true, ctx) if err != nil { return fmt.Errorf("error opening ec files: %w", err) } @@ -184,9 +187,9 @@ func readFromFile(file *os.File, data []byte, ecFileOffset int64) (err error) { return } -func removeGeneratedFiles(baseFileName string) { - for i := 0; i < DataShardsCount+ParityShardsCount; i++ { - fname := fmt.Sprintf("%s.ec%02d", baseFileName, i) +func removeGeneratedFiles(baseFileName string, ctx *ECContext) { + for i := 0; i < ctx.Total(); i++ { + fname := baseFileName + ctx.ToExt(i) os.Remove(fname) } os.Remove(baseFileName + ".ecx") diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index 839428e7b..5cff1bc4b 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -41,7 +41,8 @@ type EcVolume struct { ecjFileAccessLock sync.Mutex diskType types.DiskType datFileSize int64 - ExpireAtSec uint64 //ec volume destroy time, calculated from the ec volume was created + ExpireAtSec uint64 //ec volume destroy time, calculated from the ec volume was created + ECContext *ECContext // EC encoding parameters } func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) { @@ -73,9 +74,32 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection ev.Version = needle.Version(volumeInfo.Version) ev.datFileSize = volumeInfo.DatFileSize ev.ExpireAtSec = volumeInfo.ExpireAtSec + + // Initialize EC context from .vif if present; fallback to defaults + if volumeInfo.EcShardConfig != nil { + ds := int(volumeInfo.EcShardConfig.DataShards) + ps := int(volumeInfo.EcShardConfig.ParityShards) + + // Validate shard counts to prevent zero or invalid values + if ds <= 0 || ps <= 0 || ds+ps > MaxShardCount { + glog.Warningf("Invalid EC config in VolumeInfo for volume %d (data=%d, parity=%d), using defaults", vid, ds, ps) + ev.ECContext = NewDefaultECContext(collection, vid) + } else { + ev.ECContext = &ECContext{ + Collection: collection, + VolumeId: vid, + DataShards: ds, + ParityShards: ps, + } + glog.V(1).Infof("Loaded EC config from VolumeInfo for volume %d: %s", vid, ev.ECContext.String()) + } + } else { + ev.ECContext = NewDefaultECContext(collection, vid) + } } else { glog.Warningf("vif file not found,volumeId:%d, filename:%s", vid, dataBaseFileName) volume_info.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) + ev.ECContext = NewDefaultECContext(collection, vid) } ev.ShardLocations = make(map[ShardId][]pb.ServerAddress) @@ -260,7 +284,7 @@ func (ev *EcVolume) LocateEcShardNeedleInterval(version needle.Version, offset i if ev.datFileSize > 0 { // To get the correct LargeBlockRowsCount // use datFileSize to calculate the shardSize to match the EC encoding logic. - shardSize = ev.datFileSize / DataShardsCount + shardSize = ev.datFileSize / int64(ev.ECContext.DataShards) } // calculate the locations in the ec shards intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, shardSize, offset, types.Size(needle.GetActualSize(size, version))) diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go index 53b352168..4d34ccbde 100644 --- a/weed/storage/erasure_coding/ec_volume_info.go +++ b/weed/storage/erasure_coding/ec_volume_info.go @@ -87,7 +87,7 @@ func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo { // Copy shard sizes for remaining shards retIndex := 0 - for shardId := ShardId(0); shardId < TotalShardsCount && retIndex < len(ret.ShardSizes); shardId++ { + for shardId := ShardId(0); shardId < ShardId(MaxShardCount) && retIndex < len(ret.ShardSizes); shardId++ { if ret.ShardBits.HasShardId(shardId) { if size, exists := ecInfo.GetShardSize(shardId); exists { ret.ShardSizes[retIndex] = size @@ -119,19 +119,28 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb. type ShardBits uint32 // use bits to indicate the shard id, use 32 bits just for possible future extension func (b ShardBits) AddShardId(id ShardId) ShardBits { + if id >= MaxShardCount { + return b // Reject out-of-range shard IDs + } return b | (1 << id) } func (b ShardBits) RemoveShardId(id ShardId) ShardBits { + if id >= MaxShardCount { + return b // Reject out-of-range shard IDs + } return b &^ (1 << id) } func (b ShardBits) HasShardId(id ShardId) bool { + if id >= MaxShardCount { + return false // Out-of-range shard IDs are never present + } return b&(1< 0 } func (b ShardBits) ShardIds() (ret []ShardId) { - for i := ShardId(0); i < TotalShardsCount; i++ { + for i := ShardId(0); i < ShardId(MaxShardCount); i++ { if b.HasShardId(i) { ret = append(ret, i) } @@ -140,7 +149,7 @@ func (b ShardBits) ShardIds() (ret []ShardId) { } func (b ShardBits) ToUint32Slice() (ret []uint32) { - for i := uint32(0); i < TotalShardsCount; i++ { + for i := uint32(0); i < uint32(MaxShardCount); i++ { if b.HasShardId(ShardId(i)) { ret = append(ret, i) } @@ -164,6 +173,8 @@ func (b ShardBits) Plus(other ShardBits) ShardBits { } func (b ShardBits) MinusParityShards() ShardBits { + // Removes parity shards from the bit mask + // Assumes default 10+4 EC layout where parity shards are IDs 10-13 for i := DataShardsCount; i < TotalShardsCount; i++ { b = b.RemoveShardId(ShardId(i)) } @@ -205,7 +216,7 @@ func (b ShardBits) IndexToShardId(index int) (shardId ShardId, found bool) { } currentIndex := 0 - for i := ShardId(0); i < TotalShardsCount; i++ { + for i := ShardId(0); i < ShardId(MaxShardCount); i++ { if b.HasShardId(i) { if currentIndex == index { return i, true @@ -234,7 +245,7 @@ func (ecInfo *EcVolumeInfo) resizeShardSizes(prevShardBits ShardBits) { // Copy existing sizes to new positions based on current ShardBits if len(ecInfo.ShardSizes) > 0 { newIndex := 0 - for shardId := ShardId(0); shardId < TotalShardsCount && newIndex < expectedLength; shardId++ { + for shardId := ShardId(0); shardId < ShardId(MaxShardCount) && newIndex < expectedLength; shardId++ { if ecInfo.ShardBits.HasShardId(shardId) { // Try to find the size for this shard in the old array using previous ShardBits if oldIndex, found := prevShardBits.ShardIdToIndex(shardId); found && oldIndex < len(ecInfo.ShardSizes) { diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 0126ad9d4..6a26b4ae0 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -350,7 +350,8 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum return 0, false, fmt.Errorf("failed to create encoder: %w", err) } - bufs := make([][]byte, erasure_coding.TotalShardsCount) + // Use MaxShardCount to support custom EC ratios up to 32 shards + bufs := make([][]byte, erasure_coding.MaxShardCount) var wg sync.WaitGroup ecVolume.ShardLocationsLock.RLock() diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go index 844e92f55..c8b511338 100644 --- a/weed/topology/topology_ec.go +++ b/weed/topology/topology_ec.go @@ -10,7 +10,8 @@ import ( type EcShardLocations struct { Collection string - Locations [erasure_coding.TotalShardsCount][]*DataNode + // Use MaxShardCount (32) to support custom EC ratios + Locations [erasure_coding.MaxShardCount][]*DataNode } func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) (newShards, deletedShards []*erasure_coding.EcVolumeInfo) { @@ -90,6 +91,10 @@ func NewEcShardLocations(collection string) *EcShardLocations { } func (loc *EcShardLocations) AddShard(shardId erasure_coding.ShardId, dn *DataNode) (added bool) { + // Defensive bounds check to prevent panic with out-of-range shard IDs + if int(shardId) >= erasure_coding.MaxShardCount { + return false + } dataNodes := loc.Locations[shardId] for _, n := range dataNodes { if n.Id() == dn.Id() { @@ -101,6 +106,10 @@ func (loc *EcShardLocations) AddShard(shardId erasure_coding.ShardId, dn *DataNo } func (loc *EcShardLocations) DeleteShard(shardId erasure_coding.ShardId, dn *DataNode) (deleted bool) { + // Defensive bounds check to prevent panic with out-of-range shard IDs + if int(shardId) >= erasure_coding.MaxShardCount { + return false + } dataNodes := loc.Locations[shardId] foundIndex := -1 for index, n := range dataNodes { diff --git a/weed/util/log_buffer/log_buffer_flush_gap_test.go b/weed/util/log_buffer/log_buffer_flush_gap_test.go index 63d344b1a..5e4d4fab7 100644 --- a/weed/util/log_buffer/log_buffer_flush_gap_test.go +++ b/weed/util/log_buffer/log_buffer_flush_gap_test.go @@ -15,10 +15,11 @@ import ( // are lost in the gap between flushed disk data and in-memory buffer. // // OBSERVED BEHAVIOR FROM LOGS: -// Request offset: 1764 -// Disk contains: 1000-1763 (764 messages) -// Memory buffer starts at: 1800 -// Gap: 1764-1799 (36 messages) ← MISSING! +// +// Request offset: 1764 +// Disk contains: 1000-1763 (764 messages) +// Memory buffer starts at: 1800 +// Gap: 1764-1799 (36 messages) ← MISSING! // // This test verifies: // 1. All messages sent to buffer are accounted for @@ -27,46 +28,46 @@ import ( func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) { var flushedMessages []*filer_pb.LogEntry var flushMu sync.Mutex - + flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { t.Logf("FLUSH: minOffset=%d maxOffset=%d size=%d bytes", minOffset, maxOffset, len(buf)) - + // Parse and store flushed messages flushMu.Lock() defer flushMu.Unlock() - + // Parse buffer to extract messages parsedCount := 0 for pos := 0; pos+4 < len(buf); { if pos+4 > len(buf) { break } - + size := uint32(buf[pos])<<24 | uint32(buf[pos+1])<<16 | uint32(buf[pos+2])<<8 | uint32(buf[pos+3]) if pos+4+int(size) > len(buf) { break } - + entryData := buf[pos+4 : pos+4+int(size)] logEntry := &filer_pb.LogEntry{} if err := proto.Unmarshal(entryData, logEntry); err == nil { flushedMessages = append(flushedMessages, logEntry) parsedCount++ } - + pos += 4 + int(size) } - + t.Logf(" Parsed %d messages from flush buffer", parsedCount) } - + logBuffer := NewLogBuffer("test", 100*time.Millisecond, flushFn, nil, nil) defer logBuffer.ShutdownLogBuffer() - + // Send 100 messages messageCount := 100 t.Logf("Sending %d messages...", messageCount) - + for i := 0; i < messageCount; i++ { logBuffer.AddToBuffer(&mq_pb.DataMessage{ Key: []byte(fmt.Sprintf("key-%d", i)), @@ -74,11 +75,11 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) { TsNs: time.Now().UnixNano(), }) } - + // Force flush multiple times to simulate real workload t.Logf("Forcing flush...") logBuffer.ForceFlush() - + // Add more messages after flush for i := messageCount; i < messageCount+50; i++ { logBuffer.AddToBuffer(&mq_pb.DataMessage{ @@ -87,18 +88,18 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) { TsNs: time.Now().UnixNano(), }) } - + // Force another flush logBuffer.ForceFlush() time.Sleep(200 * time.Millisecond) // Wait for flush to complete - + // Now check the buffer state logBuffer.RLock() bufferStartOffset := logBuffer.bufferStartOffset currentOffset := logBuffer.offset pos := logBuffer.pos logBuffer.RUnlock() - + flushMu.Lock() flushedCount := len(flushedMessages) var maxFlushedOffset int64 = -1 @@ -108,23 +109,23 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) { maxFlushedOffset = flushedMessages[flushedCount-1].Offset } flushMu.Unlock() - + t.Logf("\nBUFFER STATE AFTER FLUSH:") t.Logf(" bufferStartOffset: %d", bufferStartOffset) t.Logf(" currentOffset (HWM): %d", currentOffset) t.Logf(" pos (bytes in buffer): %d", pos) t.Logf(" Messages sent: %d (offsets 0-%d)", messageCount+50, messageCount+49) t.Logf(" Messages flushed to disk: %d (offsets %d-%d)", flushedCount, minFlushedOffset, maxFlushedOffset) - + // CRITICAL CHECK: Is there a gap between flushed data and memory buffer? if flushedCount > 0 && maxFlushedOffset >= 0 { gap := bufferStartOffset - (maxFlushedOffset + 1) - + t.Logf("\nOFFSET CONTINUITY CHECK:") t.Logf(" Last flushed offset: %d", maxFlushedOffset) t.Logf(" Buffer starts at: %d", bufferStartOffset) t.Logf(" Gap: %d offsets", gap) - + if gap > 0 { t.Errorf("❌ CRITICAL BUG REPRODUCED: OFFSET GAP DETECTED!") t.Errorf(" Disk has offsets %d-%d", minFlushedOffset, maxFlushedOffset) @@ -137,22 +138,22 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) { } else { t.Logf("✅ PASS: No gap detected - offsets are continuous") } - + // Check if we can read all expected offsets t.Logf("\nREADABILITY CHECK:") for testOffset := int64(0); testOffset < currentOffset; testOffset += 10 { // Try to read from buffer requestPosition := NewMessagePositionFromOffset(testOffset) buf, _, err := logBuffer.ReadFromBuffer(requestPosition) - + isReadable := (buf != nil && len(buf.Bytes()) > 0) || err == ResumeFromDiskError status := "✅" if !isReadable && err == nil { status = "❌ NOT READABLE" } - + t.Logf(" Offset %d: %s (buf=%v, err=%v)", testOffset, status, buf != nil, err) - + // If offset is in the gap, it should fail to read if flushedCount > 0 && testOffset > maxFlushedOffset && testOffset < bufferStartOffset { if isReadable { @@ -163,19 +164,19 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) { } } } - + // Check that all sent messages are accounted for expectedMessageCount := messageCount + 50 messagesInMemory := int(currentOffset - bufferStartOffset) totalAccountedFor := flushedCount + messagesInMemory - + t.Logf("\nMESSAGE ACCOUNTING:") t.Logf(" Expected: %d messages", expectedMessageCount) t.Logf(" Flushed to disk: %d", flushedCount) t.Logf(" In memory buffer: %d (offset range %d-%d)", messagesInMemory, bufferStartOffset, currentOffset-1) t.Logf(" Total accounted for: %d", totalAccountedFor) t.Logf(" Missing: %d messages", expectedMessageCount-totalAccountedFor) - + if totalAccountedFor < expectedMessageCount { t.Errorf("❌ DATA LOSS CONFIRMED: %d messages are missing!", expectedMessageCount-totalAccountedFor) } else { @@ -188,23 +189,23 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) { func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) { var flushCount int var flushMu sync.Mutex - + flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { flushMu.Lock() flushCount++ count := flushCount flushMu.Unlock() - + t.Logf("FLUSH #%d: minOffset=%d maxOffset=%d size=%d bytes", count, minOffset, maxOffset, len(buf)) } - + logBuffer := NewLogBuffer("test", 100*time.Millisecond, flushFn, nil, nil) defer logBuffer.ShutdownLogBuffer() - + // Send messages in batches with flushes in between for batch := 0; batch < 5; batch++ { t.Logf("\nBatch %d:", batch) - + // Send 20 messages for i := 0; i < 20; i++ { offset := int64(batch*20 + i) @@ -214,28 +215,28 @@ func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) { TsNs: time.Now().UnixNano(), }) } - + // Check state before flush logBuffer.RLock() beforeFlushOffset := logBuffer.offset beforeFlushStart := logBuffer.bufferStartOffset logBuffer.RUnlock() - + // Force flush logBuffer.ForceFlush() time.Sleep(50 * time.Millisecond) - + // Check state after flush logBuffer.RLock() afterFlushOffset := logBuffer.offset afterFlushStart := logBuffer.bufferStartOffset prevBufferCount := len(logBuffer.prevBuffers.buffers) - + // Check prevBuffers state t.Logf(" Before flush: offset=%d, bufferStartOffset=%d", beforeFlushOffset, beforeFlushStart) t.Logf(" After flush: offset=%d, bufferStartOffset=%d, prevBuffers=%d", afterFlushOffset, afterFlushStart, prevBufferCount) - + // Check each prevBuffer for i, prevBuf := range logBuffer.prevBuffers.buffers { if prevBuf.size > 0 { @@ -244,7 +245,7 @@ func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) { } } logBuffer.RUnlock() - + // CRITICAL: Check if bufferStartOffset advanced correctly expectedNewStart := beforeFlushOffset if afterFlushStart != expectedNewStart { @@ -261,10 +262,10 @@ func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) { func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) { var allFlushedOffsets []int64 var flushMu sync.Mutex - + flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { t.Logf("FLUSH: offsets %d-%d (%d bytes)", minOffset, maxOffset, len(buf)) - + flushMu.Lock() // Record the offset range that was flushed for offset := minOffset; offset <= maxOffset; offset++ { @@ -272,13 +273,13 @@ func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) { } flushMu.Unlock() } - + logBuffer := NewLogBuffer("test", 50*time.Millisecond, flushFn, nil, nil) defer logBuffer.ShutdownLogBuffer() - + // Concurrently write messages and force flushes var wg sync.WaitGroup - + // Writer goroutine wg.Add(1) go func() { @@ -294,7 +295,7 @@ func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) { } } }() - + // Flusher goroutine wg.Add(1) go func() { @@ -304,31 +305,31 @@ func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) { logBuffer.ForceFlush() } }() - + wg.Wait() time.Sleep(200 * time.Millisecond) // Wait for final flush - + // Check final state logBuffer.RLock() finalOffset := logBuffer.offset finalBufferStart := logBuffer.bufferStartOffset logBuffer.RUnlock() - + flushMu.Lock() flushedCount := len(allFlushedOffsets) flushMu.Unlock() - + expectedCount := int(finalOffset) inMemory := int(finalOffset - finalBufferStart) totalAccountedFor := flushedCount + inMemory - + t.Logf("\nFINAL STATE:") t.Logf(" Total messages sent: %d (offsets 0-%d)", expectedCount, expectedCount-1) t.Logf(" Flushed to disk: %d", flushedCount) t.Logf(" In memory: %d (offsets %d-%d)", inMemory, finalBufferStart, finalOffset-1) t.Logf(" Total accounted: %d", totalAccountedFor) t.Logf(" Missing: %d", expectedCount-totalAccountedFor) - + if totalAccountedFor < expectedCount { t.Errorf("❌ DATA LOSS in concurrent scenario: %d messages missing!", expectedCount-totalAccountedFor) } @@ -344,7 +345,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { messages []*filer_pb.LogEntry } var flushMu sync.Mutex - + flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { // Parse messages from buffer messages := []*filer_pb.LogEntry{} @@ -360,7 +361,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { } pos += 4 + int(size) } - + flushMu.Lock() flushedData = append(flushedData, struct { minOffset int64 @@ -368,17 +369,17 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { messages []*filer_pb.LogEntry }{minOffset, maxOffset, messages}) flushMu.Unlock() - + t.Logf("FLUSH: minOffset=%d maxOffset=%d, parsed %d messages", minOffset, maxOffset, len(messages)) } - + logBuffer := NewLogBuffer("test", time.Hour, flushFn, nil, nil) defer logBuffer.ShutdownLogBuffer() - + // Simulate broker behavior: assign Kafka offsets and add to buffer // This is what PublishWithOffset() does nextKafkaOffset := int64(0) - + // Round 1: Add 50 messages with Kafka offsets 0-49 t.Logf("\n=== ROUND 1: Adding messages 0-49 ===") for i := 0; i < 50; i++ { @@ -391,7 +392,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { logBuffer.AddLogEntryToBuffer(logEntry) nextKafkaOffset++ } - + // Check buffer state before flush logBuffer.RLock() beforeFlushOffset := logBuffer.offset @@ -399,11 +400,11 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { logBuffer.RUnlock() t.Logf("Before flush: logBuffer.offset=%d, bufferStartOffset=%d, nextKafkaOffset=%d", beforeFlushOffset, beforeFlushStart, nextKafkaOffset) - + // Flush logBuffer.ForceFlush() time.Sleep(100 * time.Millisecond) - + // Check buffer state after flush logBuffer.RLock() afterFlushOffset := logBuffer.offset @@ -411,7 +412,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { logBuffer.RUnlock() t.Logf("After flush: logBuffer.offset=%d, bufferStartOffset=%d", afterFlushOffset, afterFlushStart) - + // Round 2: Add another 50 messages with Kafka offsets 50-99 t.Logf("\n=== ROUND 2: Adding messages 50-99 ===") for i := 0; i < 50; i++ { @@ -424,20 +425,20 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { logBuffer.AddLogEntryToBuffer(logEntry) nextKafkaOffset++ } - + logBuffer.ForceFlush() time.Sleep(100 * time.Millisecond) - + // Verification: Check if all Kafka offsets are accounted for flushMu.Lock() t.Logf("\n=== VERIFICATION ===") t.Logf("Expected Kafka offsets: 0-%d", nextKafkaOffset-1) - + allOffsets := make(map[int64]bool) for flushIdx, flush := range flushedData { t.Logf("Flush #%d: minOffset=%d, maxOffset=%d, messages=%d", flushIdx, flush.minOffset, flush.maxOffset, len(flush.messages)) - + for _, msg := range flush.messages { if allOffsets[msg.Offset] { t.Errorf(" ❌ DUPLICATE: Offset %d appears multiple times!", msg.Offset) @@ -446,7 +447,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { } } flushMu.Unlock() - + // Check for missing offsets missingOffsets := []int64{} for expectedOffset := int64(0); expectedOffset < nextKafkaOffset; expectedOffset++ { @@ -454,7 +455,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { missingOffsets = append(missingOffsets, expectedOffset) } } - + if len(missingOffsets) > 0 { t.Errorf("\n❌ MISSING OFFSETS DETECTED: %d offsets missing", len(missingOffsets)) if len(missingOffsets) <= 20 { @@ -466,18 +467,18 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { } else { t.Logf("\n✅ SUCCESS: All %d Kafka offsets accounted for (0-%d)", nextKafkaOffset, nextKafkaOffset-1) } - + // Check buffer offset consistency logBuffer.RLock() finalOffset := logBuffer.offset finalBufferStart := logBuffer.bufferStartOffset logBuffer.RUnlock() - + t.Logf("\nFinal buffer state:") t.Logf(" logBuffer.offset: %d", finalOffset) t.Logf(" bufferStartOffset: %d", finalBufferStart) t.Logf(" Expected (nextKafkaOffset): %d", nextKafkaOffset) - + if finalOffset != nextKafkaOffset { t.Errorf("❌ logBuffer.offset mismatch: expected %d, got %d", nextKafkaOffset, finalOffset) } @@ -488,12 +489,12 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) { func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) { var flushedOffsets []int64 var flushMu sync.Mutex - + readFromDiskFn := func(startPosition MessagePosition, stopTsNs int64, eachLogEntryFn EachLogEntryFuncType) (MessagePosition, bool, error) { // Simulate reading from disk - return flushed offsets flushMu.Lock() defer flushMu.Unlock() - + for _, offset := range flushedOffsets { if offset >= startPosition.Offset { logEntry := &filer_pb.LogEntry{ @@ -510,12 +511,12 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) { } return startPosition, false, nil } - + flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { // Parse and store flushed offsets flushMu.Lock() defer flushMu.Unlock() - + for pos := 0; pos+4 < len(buf); { size := uint32(buf[pos])<<24 | uint32(buf[pos+1])<<16 | uint32(buf[pos+2])<<8 | uint32(buf[pos+3]) if pos+4+int(size) > len(buf) { @@ -528,14 +529,14 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) { } pos += 4 + int(size) } - + t.Logf("FLUSH: Stored %d offsets to disk (minOffset=%d, maxOffset=%d)", len(flushedOffsets), minOffset, maxOffset) } - + logBuffer := NewLogBuffer("test", time.Hour, flushFn, readFromDiskFn, nil) defer logBuffer.ShutdownLogBuffer() - + // Add 100 messages t.Logf("Adding 100 messages...") for i := int64(0); i < 100; i++ { @@ -547,32 +548,32 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) { } logBuffer.AddLogEntryToBuffer(logEntry) } - + // Flush (moves data to disk) t.Logf("Flushing...") logBuffer.ForceFlush() time.Sleep(100 * time.Millisecond) - + // Now try to read all messages using ReadMessagesAtOffset t.Logf("\nReading messages from offset 0...") messages, nextOffset, hwm, endOfPartition, err := logBuffer.ReadMessagesAtOffset(0, 1000, 1024*1024) - + t.Logf("Read result: messages=%d, nextOffset=%d, hwm=%d, endOfPartition=%v, err=%v", len(messages), nextOffset, hwm, endOfPartition, err) - + // Verify all offsets can be read readOffsets := make(map[int64]bool) for _, msg := range messages { readOffsets[msg.Offset] = true } - + missingOffsets := []int64{} for expectedOffset := int64(0); expectedOffset < 100; expectedOffset++ { if !readOffsets[expectedOffset] { missingOffsets = append(missingOffsets, expectedOffset) } } - + if len(missingOffsets) > 0 { t.Errorf("❌ MISSING OFFSETS after flush: %d offsets cannot be read", len(missingOffsets)) if len(missingOffsets) <= 20 { @@ -590,29 +591,29 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) { func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) { flushedRanges := []struct{ min, max int64 }{} var flushMu sync.Mutex - + flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { flushMu.Lock() flushedRanges = append(flushedRanges, struct{ min, max int64 }{minOffset, maxOffset}) flushMu.Unlock() t.Logf("FLUSH: offsets %d-%d", minOffset, maxOffset) } - + logBuffer := NewLogBuffer("test", time.Hour, flushFn, nil, nil) // Long interval, manual flush only defer logBuffer.ShutdownLogBuffer() - + // Send messages, flush, check state - repeat for round := 0; round < 3; round++ { t.Logf("\n=== ROUND %d ===", round) - + // Check state before adding messages logBuffer.RLock() beforeOffset := logBuffer.offset beforeStart := logBuffer.bufferStartOffset logBuffer.RUnlock() - + t.Logf("Before adding: offset=%d, bufferStartOffset=%d", beforeOffset, beforeStart) - + // Add 10 messages for i := 0; i < 10; i++ { logBuffer.AddToBuffer(&mq_pb.DataMessage{ @@ -621,28 +622,28 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) { TsNs: time.Now().UnixNano(), }) } - + // Check state after adding logBuffer.RLock() afterAddOffset := logBuffer.offset afterAddStart := logBuffer.bufferStartOffset logBuffer.RUnlock() - + t.Logf("After adding: offset=%d, bufferStartOffset=%d", afterAddOffset, afterAddStart) - + // Force flush t.Logf("Forcing flush...") logBuffer.ForceFlush() time.Sleep(100 * time.Millisecond) - + // Check state after flush logBuffer.RLock() afterFlushOffset := logBuffer.offset afterFlushStart := logBuffer.bufferStartOffset logBuffer.RUnlock() - + t.Logf("After flush: offset=%d, bufferStartOffset=%d", afterFlushOffset, afterFlushStart) - + // CRITICAL CHECK: bufferStartOffset should advance to where offset was before flush if afterFlushStart != afterAddOffset { t.Errorf("❌ FLUSH BUG: bufferStartOffset did NOT advance correctly!") @@ -653,19 +654,19 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) { t.Logf("✅ bufferStartOffset correctly advanced to %d", afterFlushStart) } } - + // Final verification: check all offset ranges are continuous flushMu.Lock() t.Logf("\n=== FLUSHED RANGES ===") for i, r := range flushedRanges { t.Logf("Flush #%d: offsets %d-%d", i, r.min, r.max) - + // Check continuity with previous flush if i > 0 { prevMax := flushedRanges[i-1].max currentMin := r.min gap := currentMin - (prevMax + 1) - + if gap > 0 { t.Errorf("❌ GAP between flush #%d and #%d: %d offsets missing!", i-1, i, gap) } else if gap < 0 { @@ -677,4 +678,3 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) { } flushMu.Unlock() } - diff --git a/weed/util/version/constants.go b/weed/util/version/constants.go index 16e8c7d14..96a3ce757 100644 --- a/weed/util/version/constants.go +++ b/weed/util/version/constants.go @@ -8,7 +8,7 @@ import ( var ( MAJOR_VERSION = int32(3) - MINOR_VERSION = int32(98) + MINOR_VERSION = int32(99) VERSION_NUMBER = fmt.Sprintf("%d.%02d", MAJOR_VERSION, MINOR_VERSION) VERSION = util.SizeLimit + " " + VERSION_NUMBER COMMIT = "" diff --git a/weed/worker/client.go b/weed/worker/client.go index 73e7347a8..6d72e11ff 100644 --- a/weed/worker/client.go +++ b/weed/worker/client.go @@ -18,7 +18,6 @@ type GrpcAdminClient struct { adminAddress string workerID string dialOption grpc.DialOption - comms clientChannels // Reconnection parameters diff --git a/weed/worker/tasks/erasure_coding/ec_task.go b/weed/worker/tasks/erasure_coding/ec_task.go index 18f192bc9..df7fc94f9 100644 --- a/weed/worker/tasks/erasure_coding/ec_task.go +++ b/weed/worker/tasks/erasure_coding/ec_task.go @@ -374,7 +374,8 @@ func (t *ErasureCodingTask) generateEcShardsLocally(localFiles map[string]string var generatedShards []string var totalShardSize int64 - for i := 0; i < erasure_coding.TotalShardsCount; i++ { + // Check up to MaxShardCount (32) to support custom EC ratios + for i := 0; i < erasure_coding.MaxShardCount; i++ { shardFile := fmt.Sprintf("%s.ec%02d", baseName, i) if info, err := os.Stat(shardFile); err == nil { shardKey := fmt.Sprintf("ec%02d", i)