package app import ( "fmt" "strings" "github.com/seaweedfs/seaweedfs/weed/admin/dash" ) templ ClusterEcVolumes(data dash.ClusterEcVolumesData) { EC Volumes - SeaweedFS

EC Volumes ({fmt.Sprintf("%d", data.TotalVolumes)} volumes)

Total Volumes

{fmt.Sprintf("%d", data.TotalVolumes)}

EC encoded volumes
Total Shards

{fmt.Sprintf("%d", data.TotalShards)}

Distributed shards
Complete Volumes

{fmt.Sprintf("%d", data.CompleteVolumes)}

All shards present
Incomplete Volumes

{fmt.Sprintf("%d", data.IncompleteVolumes)}

Missing shards
Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int { end := data.Page * data.PageSize if end > data.TotalVolumes { return data.TotalVolumes } return end }())} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes
per page
if data.Collection != "" {
if data.Collection == "default" { Collection: default } else { Collection: {data.Collection} } Clear Filter
}
if data.ShowCollectionColumn { } if data.ShowDataCenterColumn { } for _, volume := range data.EcVolumes { if data.ShowCollectionColumn { } if data.ShowDataCenterColumn { } }
Volume ID if data.SortBy == "volume_id" { if data.SortOrder == "asc" { } else { } } else { } Collection if data.SortBy == "collection" { if data.SortOrder == "asc" { } else { } } else { } Shard Count if data.SortBy == "total_shards" { if data.SortOrder == "asc" { } else { } } else { } Shard Size Shard Locations Status if data.SortBy == "completeness" { if data.SortOrder == "asc" { } else { } } else { } Data CentersActions
{fmt.Sprintf("%d", volume.VolumeID)} if volume.Collection != "" { {volume.Collection} } else { default } {fmt.Sprintf("%d/14", volume.TotalShards)} @displayShardSizes(volume.ShardSizes) @displayVolumeDistribution(volume) @displayEcVolumeStatus(volume) for i, dc := range volume.DataCenters { if i > 0 { , } {dc} }
if !volume.IsComplete { }
if data.TotalPages > 1 { }
} // displayShardLocationsHTML renders shard locations as proper HTML templ displayShardLocationsHTML(shardLocations map[int]string) { if len(shardLocations) == 0 { No shards } else { for i, serverInfo := range groupShardsByServer(shardLocations) { if i > 0 {
} { serverInfo.Server } : { serverInfo.ShardRanges } } } } // displayShardSizes renders shard sizes in a compact format templ displayShardSizes(shardSizes map[int]int64) { if len(shardSizes) == 0 { - } else { @renderShardSizesContent(shardSizes) } } // renderShardSizesContent renders the content of shard sizes templ renderShardSizesContent(shardSizes map[int]int64) { if areAllShardSizesSame(shardSizes) { // All shards have the same size, show just the common size {getCommonShardSize(shardSizes)} } else { // Shards have different sizes, show individual sizes
{ formatIndividualShardSizes(shardSizes) }
} } // ServerShardInfo represents server and its shard ranges with sizes type ServerShardInfo struct { Server string ShardRanges string } // groupShardsByServer groups shards by server and formats ranges func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo { if len(shardLocations) == 0 { return []ServerShardInfo{} } // Group shards by server serverShards := make(map[string][]int) for shardId, server := range shardLocations { serverShards[server] = append(serverShards[server], shardId) } var serverInfos []ServerShardInfo for server, shards := range serverShards { // Sort shards for each server for i := 0; i < len(shards); i++ { for j := i + 1; j < len(shards); j++ { if shards[i] > shards[j] { shards[i], shards[j] = shards[j], shards[i] } } } // Format shard ranges compactly shardRanges := formatShardRanges(shards) serverInfos = append(serverInfos, ServerShardInfo{ Server: server, ShardRanges: shardRanges, }) } // Sort by server name for i := 0; i < len(serverInfos); i++ { for j := i + 1; j < len(serverInfos); j++ { if serverInfos[i].Server > serverInfos[j].Server { serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i] } } } return serverInfos } // groupShardsByServerWithSizes groups shards by server and formats ranges with sizes func groupShardsByServerWithSizes(shardLocations map[int]string, shardSizes map[int]int64) []ServerShardInfo { if len(shardLocations) == 0 { return []ServerShardInfo{} } // Group shards by server serverShards := make(map[string][]int) for shardId, server := range shardLocations { serverShards[server] = append(serverShards[server], shardId) } var serverInfos []ServerShardInfo for server, shards := range serverShards { // Sort shards for each server for i := 0; i < len(shards); i++ { for j := i + 1; j < len(shards); j++ { if shards[i] > shards[j] { shards[i], shards[j] = shards[j], shards[i] } } } // Format shard ranges compactly with sizes shardRanges := formatShardRangesWithSizes(shards, shardSizes) serverInfos = append(serverInfos, ServerShardInfo{ Server: server, ShardRanges: shardRanges, }) } // Sort by server name for i := 0; i < len(serverInfos); i++ { for j := i + 1; j < len(serverInfos); j++ { if serverInfos[i].Server > serverInfos[j].Server { serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i] } } } return serverInfos } // Helper function to format shard ranges compactly (e.g., "0-3,7,9-11") func formatShardRanges(shards []int) string { if len(shards) == 0 { return "" } var ranges []string start := shards[0] end := shards[0] for i := 1; i < len(shards); i++ { if shards[i] == end+1 { end = shards[i] } else { if start == end { ranges = append(ranges, fmt.Sprintf("%d", start)) } else { ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) } start = shards[i] end = shards[i] } } // Add the last range if start == end { ranges = append(ranges, fmt.Sprintf("%d", start)) } else { ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) } return strings.Join(ranges, ",") } // Helper function to format shard ranges with sizes (e.g., "0(1.2MB),1-3(2.5MB),7(800KB)") func formatShardRangesWithSizes(shards []int, shardSizes map[int]int64) string { if len(shards) == 0 { return "" } var ranges []string start := shards[0] end := shards[0] var totalSize int64 for i := 1; i < len(shards); i++ { if shards[i] == end+1 { end = shards[i] totalSize += shardSizes[shards[i]] } else { // Add current range with size if start == end { size := shardSizes[start] if size > 0 { ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size))) } else { ranges = append(ranges, fmt.Sprintf("%d", start)) } } else { // Calculate total size for the range rangeSize := shardSizes[start] for j := start + 1; j <= end; j++ { rangeSize += shardSizes[j] } if rangeSize > 0 { ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize))) } else { ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) } } start = shards[i] end = shards[i] totalSize = shardSizes[shards[i]] } } // Add the last range if start == end { size := shardSizes[start] if size > 0 { ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size))) } else { ranges = append(ranges, fmt.Sprintf("%d", start)) } } else { // Calculate total size for the range rangeSize := shardSizes[start] for j := start + 1; j <= end; j++ { rangeSize += shardSizes[j] } if rangeSize > 0 { ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize))) } else { ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) } } return strings.Join(ranges, ",") } // Helper function to convert bytes to human readable format func bytesToHumanReadable(bytes int64) string { const unit = 1024 if bytes < unit { return fmt.Sprintf("%dB", bytes) } div, exp := int64(unit), 0 for n := bytes / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp]) } // Helper function to format missing shards func formatMissingShards(missingShards []int) string { if len(missingShards) == 0 { return "" } var shardStrs []string for _, shard := range missingShards { shardStrs = append(shardStrs, fmt.Sprintf("%d", shard)) } return strings.Join(shardStrs, ", ") } // Helper function to check if all shard sizes are the same func areAllShardSizesSame(shardSizes map[int]int64) bool { if len(shardSizes) <= 1 { return true } var firstSize int64 = -1 for _, size := range shardSizes { if firstSize == -1 { firstSize = size } else if size != firstSize { return false } } return true } // Helper function to get the common shard size (when all shards are the same size) func getCommonShardSize(shardSizes map[int]int64) string { for _, size := range shardSizes { return bytesToHumanReadable(size) } return "-" } // Helper function to format individual shard sizes func formatIndividualShardSizes(shardSizes map[int]int64) string { if len(shardSizes) == 0 { return "" } // Group shards by size for more compact display sizeGroups := make(map[int64][]int) for shardId, size := range shardSizes { sizeGroups[size] = append(sizeGroups[size], shardId) } // If there are only 1-2 different sizes, show them grouped if len(sizeGroups) <= 3 { var groupStrs []string for size, shardIds := range sizeGroups { // Sort shard IDs for i := 0; i < len(shardIds); i++ { for j := i + 1; j < len(shardIds); j++ { if shardIds[i] > shardIds[j] { shardIds[i], shardIds[j] = shardIds[j], shardIds[i] } } } var idRanges []string if len(shardIds) <= 4 { // Show individual IDs if few shards for _, id := range shardIds { idRanges = append(idRanges, fmt.Sprintf("%d", id)) } } else { // Show count if many shards idRanges = append(idRanges, fmt.Sprintf("%d shards", len(shardIds))) } groupStrs = append(groupStrs, fmt.Sprintf("%s: %s", strings.Join(idRanges, ","), bytesToHumanReadable(size))) } return strings.Join(groupStrs, " | ") } // If too many different sizes, show summary return fmt.Sprintf("%d different sizes", len(sizeGroups)) } // displayVolumeDistribution shows the distribution summary for a volume templ displayVolumeDistribution(volume dash.EcVolumeWithShards) {
{ calculateVolumeDistributionSummary(volume) }
} // displayEcVolumeStatus shows an improved status display for EC volumes templ displayEcVolumeStatus(volume dash.EcVolumeWithShards) { if volume.IsComplete { Complete } else { if len(volume.MissingShards) > 10 { Critical ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } else if len(volume.MissingShards) > 6 { Degraded ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } else if len(volume.MissingShards) > 2 { Incomplete ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } else { Minor Issues ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) } } } // calculateVolumeDistributionSummary calculates and formats the distribution summary for a volume func calculateVolumeDistributionSummary(volume dash.EcVolumeWithShards) string { dataCenters := make(map[string]bool) racks := make(map[string]bool) servers := make(map[string]bool) // Count unique servers from shard locations for _, server := range volume.ShardLocations { servers[server] = true } // Use the DataCenters field if available for _, dc := range volume.DataCenters { dataCenters[dc] = true } // Use the Servers field if available for _, server := range volume.Servers { servers[server] = true } // Use the Racks field if available for _, rack := range volume.Racks { racks[rack] = true } // If we don't have rack information, estimate it from servers as fallback rackCount := len(racks) if rackCount == 0 { // Fallback estimation - assume each server might be in a different rack rackCount = len(servers) if len(dataCenters) > 0 { // More conservative estimate if we have DC info rackCount = (len(servers) + len(dataCenters) - 1) / len(dataCenters) if rackCount == 0 { rackCount = 1 } } } return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), rackCount, len(servers)) }