package app import ( "fmt" "sort" "strings" "github.com/seaweedfs/seaweedfs/weed/admin/dash" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" ) templ ClusterEcVolumes(data dash.ClusterEcVolumesData) {

EC Volumes

if data.Collection != "" {
if data.Collection == "default" { Collection: default } else { Collection: {data.Collection} } Clear Filter
}
Total Volumes

{fmt.Sprintf("%d", data.TotalVolumes)}

Total Shards

{fmt.Sprintf("%d", data.TotalShards)}

Healthy Volumes

{fmt.Sprintf("%d", data.CompleteVolumes)}

All { fmt.Sprintf("%d", erasure_coding.TotalShardsCount) } shards present
Degraded Volumes

{fmt.Sprintf("%d", data.IncompleteVolumes)}

Incomplete/Critical
if data.ShowCollectionColumn { } if data.ShowDataCenterColumn { } for _, volume := range data.EcVolumes { if data.ShowCollectionColumn { } if data.ShowDataCenterColumn { } }
Volume ID if data.SortBy == "volume_id" { if data.SortOrder == "asc" { } else { } } else { } Collection if data.SortBy == "collection" { if data.SortOrder == "asc" { } else { } } else { } Shard Count if data.SortBy == "total_shards" { if data.SortOrder == "asc" { } else { } } else { } Shard Size Shard Locations Status if data.SortBy == "completeness" { if data.SortOrder == "asc" { } else { } } else { } Data CentersActions
{fmt.Sprintf("%d", volume.VolumeID)} if volume.Collection != "" { {volume.Collection} } else { default } {fmt.Sprintf("%d/%d", volume.TotalShards, erasure_coding.TotalShardsCount)} @displayShardSizes(volume.ShardSizes) @displayVolumeDistribution(volume) @displayEcVolumeStatus(volume) for i, dc := range volume.DataCenters { if i > 0 { , } {dc} }
if !volume.IsComplete { }
if data.TotalPages > 1 { } } // displayShardSizes renders shard sizes in a compact format templ displayShardSizes(shardSizes map[int]int64) { if len(shardSizes) == 0 { - } else { @renderShardSizesContent(shardSizes) } } // renderShardSizesContent renders the content of shard sizes templ renderShardSizesContent(shardSizes map[int]int64) { if areAllShardSizesSame(shardSizes) { // All shards have the same size, show just the common size {getCommonShardSize(shardSizes)} } else { // Shards have different sizes, show individual sizes
{ formatIndividualShardSizes(shardSizes) }
} } // ServerShardInfo represents server and its shard ranges with sizes type ServerShardInfo struct { Server string ShardRanges string } // groupShardsByServer groups shards by server and formats ranges func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo { if len(shardLocations) == 0 { return []ServerShardInfo{} } // Group shards by server serverShards := make(map[string][]int) for shardId, server := range shardLocations { serverShards[server] = append(serverShards[server], shardId) } var serverInfos []ServerShardInfo for server, shards := range serverShards { // Sort shards for each server sort.Ints(shards) // Format shard ranges compactly shardRanges := formatShardRanges(shards) serverInfos = append(serverInfos, ServerShardInfo{ Server: server, ShardRanges: shardRanges, }) } // Sort by server name sort.Slice(serverInfos, func(i, j int) bool { return serverInfos[i].Server < serverInfos[j].Server }) return serverInfos } // Helper function to format shard ranges compactly (e.g., "0-3,7,9-11") func formatShardRanges(shards []int) string { if len(shards) == 0 { return "" } var ranges []string start := shards[0] end := shards[0] for i := 1; i < len(shards); i++ { if shards[i] == end+1 { end = shards[i] } else { if start == end { ranges = append(ranges, fmt.Sprintf("%d", start)) } else { ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) } start = shards[i] end = shards[i] } } // Add the last range if start == end { ranges = append(ranges, fmt.Sprintf("%d", start)) } else { ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) } return strings.Join(ranges, ",") } // Helper function to convert bytes to human readable format func bytesToHumanReadable(bytes int64) string { const unit = 1024 if bytes < unit { return fmt.Sprintf("%dB", bytes) } div, exp := int64(unit), 0 for n := bytes / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp]) } // Helper function to format missing shards func formatMissingShards(missingShards []int) string { if len(missingShards) == 0 { return "" } var shardStrs []string for _, shard := range missingShards { shardStrs = append(shardStrs, fmt.Sprintf("%d", shard)) } return strings.Join(shardStrs, ", ") } // Helper function to check if all shard sizes are the same func areAllShardSizesSame(shardSizes map[int]int64) bool { if len(shardSizes) <= 1 { return true } var firstSize int64 = -1 for _, size := range shardSizes { if firstSize == -1 { firstSize = size } else if size != firstSize { return false } } return true } // Helper function to get the common shard size (when all shards are the same size) func getCommonShardSize(shardSizes map[int]int64) string { for _, size := range shardSizes { return bytesToHumanReadable(size) } return "-" } // Helper function to format individual shard sizes func formatIndividualShardSizes(shardSizes map[int]int64) string { if len(shardSizes) == 0 { return "" } // Group shards by size for more compact display sizeGroups := make(map[int64][]int) for shardId, size := range shardSizes { sizeGroups[size] = append(sizeGroups[size], shardId) } // If there are only 1-2 different sizes, show them grouped if len(sizeGroups) <= 3 { var groupStrs []string for size, shardIds := range sizeGroups { // Sort shard IDs sort.Ints(shardIds) var idRanges []string if len(shardIds) <= erasure_coding.ParityShardsCount { // Show individual IDs if few shards for _, id := range shardIds { idRanges = append(idRanges, fmt.Sprintf("%d", id)) } } else { // Show count if many shards idRanges = append(idRanges, fmt.Sprintf("%d shards", len(shardIds))) } groupStrs = append(groupStrs, fmt.Sprintf("%s: %s", strings.Join(idRanges, ","), bytesToHumanReadable(size))) } return strings.Join(groupStrs, " | ") } // If too many different sizes, show summary return fmt.Sprintf("%d different sizes", len(sizeGroups)) } // displayVolumeDistribution shows the distribution summary for a volume templ displayVolumeDistribution(volume dash.EcVolumeWithShards) {
{ calculateVolumeDistributionSummary(volume) }
} // displayEcVolumeStatus shows an improved status display for EC volumes. // Status thresholds are based on EC recovery capability: // - Critical: More than DataShardsCount missing (data is unrecoverable) // - Degraded: More than half of DataShardsCount missing (high risk) // - Incomplete: More than half of ParityShardsCount missing (reduced redundancy) // - Minor Issues: Few shards missing (still fully recoverable) templ displayEcVolumeStatus(volume dash.EcVolumeWithShards) { if volume.IsComplete { Complete } else { if len(volume.MissingShards) > erasure_coding.DataShardsCount { // Unrecoverable: more shards missing than EC can reconstruct Critical ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } else if len(volume.MissingShards) > (erasure_coding.DataShardsCount/2) { // High risk: approaching unrecoverable state Degraded ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } else if len(volume.MissingShards) > (erasure_coding.ParityShardsCount/2) { // Reduced redundancy but still recoverable Incomplete ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } else { // Minor: few shards missing, fully recoverable with good margin Minor Issues ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } } } // calculateVolumeDistributionSummary calculates and formats the distribution summary for a volume func calculateVolumeDistributionSummary(volume dash.EcVolumeWithShards) string { dataCenters := make(map[string]bool) racks := make(map[string]bool) servers := make(map[string]bool) // Count unique servers from shard locations for _, server := range volume.ShardLocations { servers[server] = true } // Use the DataCenters field if available for _, dc := range volume.DataCenters { dataCenters[dc] = true } // Use the Servers field if available for _, server := range volume.Servers { servers[server] = true } // Use the Racks field if available for _, rack := range volume.Racks { racks[rack] = true } // If we don't have rack information, estimate it from servers as fallback rackCount := len(racks) if rackCount == 0 { // Fallback estimation - assume each server might be in a different rack rackCount = len(servers) if len(dataCenters) > 0 { // More conservative estimate if we have DC info rackCount = (len(servers) + len(dataCenters) - 1) / len(dataCenters) if rackCount == 0 { rackCount = 1 } } } return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), rackCount, len(servers)) }