diff --git a/weed/admin/dash/ec_shard_management.go b/weed/admin/dash/ec_shard_management.go
index 556b970bc..02f78b9d5 100644
--- a/weed/admin/dash/ec_shard_management.go
+++ b/weed/admin/dash/ec_shard_management.go
@@ -280,21 +280,36 @@ func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string,
// Initialize volume data if needed
if volumeData[volumeId] == nil {
volumeData[volumeId] = &EcVolumeWithShards{
- VolumeID: volumeId,
- Collection: ecShardInfo.Collection,
- TotalShards: 0,
- IsComplete: false,
- MissingShards: []int{},
- ShardLocations: make(map[int]string),
- ShardSizes: make(map[int]int64),
- DataCenters: []string{},
- Servers: []string{},
- Racks: []string{},
+ VolumeID: volumeId,
+ Collection: ecShardInfo.Collection,
+ TotalShards: 0,
+ IsComplete: false,
+ MissingShards: []int{},
+ ShardLocations: make(map[int]string),
+ ShardSizes: make(map[int]int64),
+ DataCenters: []string{},
+ Servers: []string{},
+ Racks: []string{},
+ Generations: []uint32{},
+ ActiveGeneration: 0,
+ HasMultipleGenerations: false,
}
}
volume := volumeData[volumeId]
+ // Track generation information
+ generationExists := false
+ for _, existingGen := range volume.Generations {
+ if existingGen == ecShardInfo.Generation {
+ generationExists = true
+ break
+ }
+ }
+ if !generationExists {
+ volume.Generations = append(volume.Generations, ecShardInfo.Generation)
+ }
+
// Track data centers and servers
dcExists := false
for _, existingDc := range volume.DataCenters {
@@ -385,6 +400,33 @@ func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string,
}
}
+ // Get active generation information from master for each volume
+ err = s.WithMasterClient(func(client master_pb.SeaweedClient) error {
+ for volumeId, volume := range volumeData {
+ // Look up active generation
+ resp, lookupErr := client.LookupEcVolume(context.Background(), &master_pb.LookupEcVolumeRequest{
+ VolumeId: volumeId,
+ })
+ if lookupErr == nil && resp != nil {
+ volume.ActiveGeneration = resp.ActiveGeneration
+ }
+
+ // Sort generations and check for multiple generations
+ if len(volume.Generations) > 1 {
+ // Sort generations (oldest first)
+ for i := 0; i < len(volume.Generations); i++ {
+ for j := i + 1; j < len(volume.Generations); j++ {
+ if volume.Generations[i] > volume.Generations[j] {
+ volume.Generations[i], volume.Generations[j] = volume.Generations[j], volume.Generations[i]
+ }
+ }
+ }
+ volume.HasMultipleGenerations = true
+ }
+ }
+ return nil // Don't fail if lookup fails
+ })
+
// Calculate completeness for each volume
completeVolumes := 0
incompleteVolumes := 0
@@ -628,6 +670,7 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrd
ModifiedTime: 0, // Not available in current API
EcIndexBits: ecShardInfo.EcIndexBits,
ShardCount: getShardCount(ecShardInfo.EcIndexBits),
+ Generation: ecShardInfo.Generation, // Include generation information
}
shards = append(shards, ecShard)
}
@@ -741,6 +784,50 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrd
}
}
+ // Analyze generation information
+ generationMap := make(map[uint32]bool)
+ generationShards := make(map[uint32][]uint32)
+ generationComplete := make(map[uint32]bool)
+
+ // Collect all generations and group shards by generation
+ for _, shard := range shards {
+ generationMap[shard.Generation] = true
+ generationShards[shard.Generation] = append(generationShards[shard.Generation], shard.ShardID)
+ }
+
+ // Convert generation map to sorted slice
+ var generations []uint32
+ for gen := range generationMap {
+ generations = append(generations, gen)
+ }
+
+ // Sort generations (oldest first)
+ for i := 0; i < len(generations); i++ {
+ for j := i + 1; j < len(generations); j++ {
+ if generations[i] > generations[j] {
+ generations[i], generations[j] = generations[j], generations[i]
+ }
+ }
+ }
+
+ // Check completion status for each generation
+ for gen, shardIDs := range generationShards {
+ generationComplete[gen] = len(shardIDs) == erasure_coding.TotalShardsCount
+ }
+
+ // Get active generation from master
+ var activeGeneration uint32
+ err = s.WithMasterClient(func(client master_pb.SeaweedClient) error {
+ // Use LookupEcVolume to get active generation
+ resp, lookupErr := client.LookupEcVolume(context.Background(), &master_pb.LookupEcVolumeRequest{
+ VolumeId: volumeID,
+ })
+ if lookupErr == nil && resp != nil {
+ activeGeneration = resp.ActiveGeneration
+ }
+ return nil // Don't fail if lookup fails, just use generation 0 as default
+ })
+
data := &EcVolumeDetailsData{
VolumeID: volumeID,
Collection: collection,
@@ -759,6 +846,12 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrd
DeleteCount: volumeHealth.DeleteCount,
GarbageRatio: volumeHealth.GarbageRatio,
+ // Generation information
+ Generations: generations,
+ ActiveGeneration: activeGeneration,
+ GenerationShards: generationShards,
+ GenerationComplete: generationComplete,
+
SortBy: sortBy,
SortOrder: sortOrder,
}
diff --git a/weed/admin/dash/types.go b/weed/admin/dash/types.go
index 676d51e89..881638ad2 100644
--- a/weed/admin/dash/types.go
+++ b/weed/admin/dash/types.go
@@ -209,6 +209,9 @@ type EcShardWithInfo struct {
ShardCount int `json:"shard_count"` // Number of shards this server has for this volume
IsComplete bool `json:"is_complete"` // True if this volume has all 14 shards
MissingShards []int `json:"missing_shards"` // List of missing shard IDs
+
+ // Generation information
+ Generation uint32 `json:"generation"` // EC volume generation
}
// EcVolumeDetailsData represents the data for the EC volume details page
@@ -231,6 +234,12 @@ type EcVolumeDetailsData struct {
DeleteCount uint64 `json:"delete_count"` // Deleted file count
GarbageRatio float64 `json:"garbage_ratio"` // Deletion ratio (0.0-1.0)
+ // Generation information
+ Generations []uint32 `json:"generations"` // All generations present for this volume
+ ActiveGeneration uint32 `json:"active_generation"` // Currently active generation
+ GenerationShards map[uint32][]uint32 `json:"generation_shards"` // Generation -> list of shard IDs
+ GenerationComplete map[uint32]bool `json:"generation_complete"` // Generation -> completion status
+
// Sorting
SortBy string `json:"sort_by"`
SortOrder string `json:"sort_order"`
@@ -502,6 +511,11 @@ type EcVolumeWithShards struct {
Servers []string `json:"servers"`
Racks []string `json:"racks"`
ModifiedTime int64 `json:"modified_time"`
+
+ // Generation information
+ Generations []uint32 `json:"generations"` // All generations present for this volume
+ ActiveGeneration uint32 `json:"active_generation"` // Currently active generation
+ HasMultipleGenerations bool `json:"has_multiple_generations"` // True if volume has multiple generations
}
// ClusterEcVolumesData represents the response for clustered EC volumes view
diff --git a/weed/admin/view/app/cluster_ec_volumes.templ b/weed/admin/view/app/cluster_ec_volumes.templ
index c84da45ca..402ff10b1 100644
--- a/weed/admin/view/app/cluster_ec_volumes.templ
+++ b/weed/admin/view/app/cluster_ec_volumes.templ
@@ -191,6 +191,7 @@ templ ClusterEcVolumes(data dash.ClusterEcVolumesData) {
Shard Size |
+ Generation |
Shard Locations |
@@ -237,6 +238,9 @@ templ ClusterEcVolumes(data dash.ClusterEcVolumesData) {
|
@displayShardSizes(volume.ShardSizes)
|
+
+ @displayGenerationInfo(volume)
+ |
@displayVolumeDistribution(volume)
|
@@ -732,6 +736,29 @@ templ displayEcVolumeStatus(volume dash.EcVolumeWithShards) {
}
}
+// displayGenerationInfo shows generation information for a volume
+templ displayGenerationInfo(volume dash.EcVolumeWithShards) {
+ if volume.HasMultipleGenerations {
+
+
+ Multi-Gen
+
+
+
+ Active: G{fmt.Sprintf("%d", volume.ActiveGeneration)}
+
+
+ } else if len(volume.Generations) > 0 {
+ if volume.ActiveGeneration > 0 {
+ G{fmt.Sprintf("%d", volume.ActiveGeneration)}
+ } else {
+ G{fmt.Sprintf("%d", volume.Generations[0])}
+ }
+ } else {
+ G0
+ }
+}
+
// calculateVolumeDistributionSummary calculates and formats the distribution summary for a volume
func calculateVolumeDistributionSummary(volume dash.EcVolumeWithShards) string {
dataCenters := make(map[string]bool)
diff --git a/weed/admin/view/app/cluster_ec_volumes_templ.go b/weed/admin/view/app/cluster_ec_volumes_templ.go
index 932075106..33d60e611 100644
--- a/weed/admin/view/app/cluster_ec_volumes_templ.go
+++ b/weed/admin/view/app/cluster_ec_volumes_templ.go
@@ -362,7 +362,7 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "Shard Size | Shard Locations | Status ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, " | Shard Size | Generation | Shard Locations | Status ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -406,7 +406,7 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 219, Col: 75}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 220, Col: 75}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
@@ -429,7 +429,7 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 225, Col: 101}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 226, Col: 101}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
@@ -457,7 +457,7 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", volume.TotalShards))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 235, Col: 104}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 236, Col: 104}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
@@ -475,7 +475,7 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = displayVolumeDistribution(volume).Render(ctx, templ_7745c5c3_Buffer)
+ templ_7745c5c3_Err = displayGenerationInfo(volume).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -483,218 +483,226 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
+ templ_7745c5c3_Err = displayVolumeDistribution(volume).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
templ_7745c5c3_Err = displayEcVolumeStatus(volume).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " | ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowDataCenterColumn {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, " | ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for i, dc := range volume.DataCenters {
if i > 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, ", ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, ", ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 252, Col: 85}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 256, Col: 85}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, " | ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, " | ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.TotalPages > 1 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "