diff --git a/weed/admin/dash/ec_shard_management.go b/weed/admin/dash/ec_shard_management.go
index a2000cbbd..12d69da21 100644
--- a/weed/admin/dash/ec_shard_management.go
+++ b/weed/admin/dash/ec_shard_management.go
@@ -27,7 +27,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
}
var ecShards []EcShardWithInfo
- shardsPerVolume := make(map[uint32]int)
+ volumeShardsMap := make(map[uint32]map[int]bool) // volumeId -> set of shards present
volumesWithAllShards := 0
volumesWithMissingShards := 0
@@ -45,15 +45,22 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
for _, diskInfo := range node.DiskInfos {
// Process EC shard information
for _, ecShardInfo := range diskInfo.EcShardInfos {
- // Count shards per volume
- shardsPerVolume[ecShardInfo.Id] += getShardCount(ecShardInfo.EcIndexBits)
+ volumeId := ecShardInfo.Id
+
+ // Initialize volume shards map if needed
+ if volumeShardsMap[volumeId] == nil {
+ volumeShardsMap[volumeId] = make(map[int]bool)
+ }
// Create individual shard entries for each shard this server has
shardBits := ecShardInfo.EcIndexBits
for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
if (shardBits & (1 << uint(shardId))) != 0 {
+ // Mark this shard as present for this volume
+ volumeShardsMap[volumeId][shardId] = true
+
ecShard := EcShardWithInfo{
- VolumeID: ecShardInfo.Id,
+ VolumeID: volumeId,
ShardID: uint32(shardId),
Collection: ecShardInfo.Collection,
Size: 0, // EC shards don't have individual size in the API response
@@ -82,24 +89,37 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
return nil, err
}
- // Calculate completeness statistics
- for volumeId, shardCount := range shardsPerVolume {
- if shardCount == erasure_coding.TotalShardsCount {
+ // Calculate volume-level completeness (across all servers)
+ volumeCompleteness := make(map[uint32]bool)
+ volumeMissingShards := make(map[uint32][]int)
+
+ for volumeId, shardsPresent := range volumeShardsMap {
+ var missingShards []int
+ shardCount := len(shardsPresent)
+
+ // Find which shards are missing for this volume across ALL servers
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if !shardsPresent[shardId] {
+ missingShards = append(missingShards, shardId)
+ }
+ }
+
+ isComplete := (shardCount == erasure_coding.TotalShardsCount)
+ volumeCompleteness[volumeId] = isComplete
+ volumeMissingShards[volumeId] = missingShards
+
+ if isComplete {
volumesWithAllShards++
} else {
volumesWithMissingShards++
}
+ }
- // Update completeness info for each shard
- for i := range ecShards {
- if ecShards[i].VolumeID == volumeId {
- ecShards[i].IsComplete = (shardCount == erasure_coding.TotalShardsCount)
- if !ecShards[i].IsComplete {
- // Calculate missing shards
- ecShards[i].MissingShards = getMissingShards(ecShards[i].EcIndexBits)
- }
- }
- }
+ // Update completeness info for each shard based on volume-level completeness
+ for i := range ecShards {
+ volumeId := ecShards[i].VolumeID
+ ecShards[i].IsComplete = volumeCompleteness[volumeId]
+ ecShards[i].MissingShards = volumeMissingShards[volumeId]
}
// Filter by collection if specified
@@ -149,7 +169,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
data := &ClusterEcShardsData{
EcShards: paginatedShards,
TotalShards: totalShards,
- TotalVolumes: len(shardsPerVolume),
+ TotalVolumes: len(volumeShardsMap),
LastUpdated: time.Now(),
// Pagination
@@ -175,11 +195,16 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
FilterCollection: collection,
// EC specific statistics
- ShardsPerVolume: shardsPerVolume,
+ ShardsPerVolume: make(map[uint32]int), // This will be recalculated below
VolumesWithAllShards: volumesWithAllShards,
VolumesWithMissingShards: volumesWithMissingShards,
}
+ // Recalculate ShardsPerVolume for the response
+ for volumeId, shardsPresent := range volumeShardsMap {
+ data.ShardsPerVolume[volumeId] = len(shardsPresent)
+ }
+
// Set single values when only one exists
if len(dataCenters) == 1 {
for dc := range dataCenters {
@@ -203,6 +228,238 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
return data, nil
}
+// GetClusterEcVolumes retrieves cluster EC volumes data grouped by volume ID with shard locations
+func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcVolumesData, error) {
+ // Set defaults
+ if page < 1 {
+ page = 1
+ }
+ if pageSize < 1 || pageSize > 1000 {
+ pageSize = 100
+ }
+ if sortBy == "" {
+ sortBy = "volume_id"
+ }
+ if sortOrder == "" {
+ sortOrder = "asc"
+ }
+
+ volumeData := make(map[uint32]*EcVolumeWithShards)
+ totalShards := 0
+
+ // Get detailed EC shard information via gRPC
+ err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ if err != nil {
+ return err
+ }
+
+ if resp.TopologyInfo != nil {
+ for _, dc := range resp.TopologyInfo.DataCenterInfos {
+ for _, rack := range dc.RackInfos {
+ for _, node := range rack.DataNodeInfos {
+ for _, diskInfo := range node.DiskInfos {
+ // Process EC shard information
+ for _, ecShardInfo := range diskInfo.EcShardInfos {
+ volumeId := ecShardInfo.Id
+
+ // Initialize volume data if needed
+ if volumeData[volumeId] == nil {
+ volumeData[volumeId] = &EcVolumeWithShards{
+ VolumeID: volumeId,
+ Collection: ecShardInfo.Collection,
+ TotalShards: 0,
+ IsComplete: false,
+ MissingShards: []int{},
+ ShardLocations: make(map[int]string),
+ DataCenters: []string{},
+ Servers: []string{},
+ }
+ }
+
+ volume := volumeData[volumeId]
+
+ // Track data centers and servers
+ dcExists := false
+ for _, existingDc := range volume.DataCenters {
+ if existingDc == dc.Id {
+ dcExists = true
+ break
+ }
+ }
+ if !dcExists {
+ volume.DataCenters = append(volume.DataCenters, dc.Id)
+ }
+
+ serverExists := false
+ for _, existingServer := range volume.Servers {
+ if existingServer == node.Id {
+ serverExists = true
+ break
+ }
+ }
+ if !serverExists {
+ volume.Servers = append(volume.Servers, node.Id)
+ }
+
+ // Process each shard this server has for this volume
+ shardBits := ecShardInfo.EcIndexBits
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if (shardBits & (1 << uint(shardId))) != 0 {
+ // Record shard location
+ volume.ShardLocations[shardId] = node.Id
+ totalShards++
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Calculate completeness for each volume
+ completeVolumes := 0
+ incompleteVolumes := 0
+
+ for _, volume := range volumeData {
+ volume.TotalShards = len(volume.ShardLocations)
+
+ // Find missing shards
+ var missingShards []int
+ for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
+ if _, exists := volume.ShardLocations[shardId]; !exists {
+ missingShards = append(missingShards, shardId)
+ }
+ }
+
+ volume.MissingShards = missingShards
+ volume.IsComplete = (len(missingShards) == 0)
+
+ if volume.IsComplete {
+ completeVolumes++
+ } else {
+ incompleteVolumes++
+ }
+ }
+
+ // Convert map to slice
+ var ecVolumes []EcVolumeWithShards
+ for _, volume := range volumeData {
+ // Filter by collection if specified
+ if collection == "" || volume.Collection == collection {
+ ecVolumes = append(ecVolumes, *volume)
+ }
+ }
+
+ // Sort the results
+ sortEcVolumes(ecVolumes, sortBy, sortOrder)
+
+ // Calculate statistics for conditional display
+ dataCenters := make(map[string]bool)
+ collections := make(map[string]bool)
+
+ for _, volume := range ecVolumes {
+ for _, dc := range volume.DataCenters {
+ dataCenters[dc] = true
+ }
+ if volume.Collection != "" {
+ collections[volume.Collection] = true
+ }
+ }
+
+ // Pagination
+ totalVolumes := len(ecVolumes)
+ totalPages := (totalVolumes + pageSize - 1) / pageSize
+ startIndex := (page - 1) * pageSize
+ endIndex := startIndex + pageSize
+ if endIndex > totalVolumes {
+ endIndex = totalVolumes
+ }
+
+ if startIndex >= totalVolumes {
+ startIndex = 0
+ endIndex = 0
+ }
+
+ paginatedVolumes := ecVolumes[startIndex:endIndex]
+
+ // Build response
+ data := &ClusterEcVolumesData{
+ EcVolumes: paginatedVolumes,
+ TotalVolumes: totalVolumes,
+ LastUpdated: time.Now(),
+
+ // Pagination
+ Page: page,
+ PageSize: pageSize,
+ TotalPages: totalPages,
+
+ // Sorting
+ SortBy: sortBy,
+ SortOrder: sortOrder,
+
+ // Filtering
+ Collection: collection,
+
+ // Conditional display flags
+ ShowDataCenterColumn: len(dataCenters) > 1,
+ ShowRackColumn: false, // We don't track racks in this view for simplicity
+ ShowCollectionColumn: len(collections) > 1,
+
+ // Statistics
+ CompleteVolumes: completeVolumes,
+ IncompleteVolumes: incompleteVolumes,
+ TotalShards: totalShards,
+ }
+
+ return data, nil
+}
+
+// sortEcVolumes sorts EC volumes based on the specified field and order
+func sortEcVolumes(volumes []EcVolumeWithShards, sortBy string, sortOrder string) {
+ sort.Slice(volumes, func(i, j int) bool {
+ var less bool
+ switch sortBy {
+ case "volume_id":
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ case "collection":
+ if volumes[i].Collection == volumes[j].Collection {
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ } else {
+ less = volumes[i].Collection < volumes[j].Collection
+ }
+ case "total_shards":
+ if volumes[i].TotalShards == volumes[j].TotalShards {
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ } else {
+ less = volumes[i].TotalShards < volumes[j].TotalShards
+ }
+ case "completeness":
+ // Complete volumes first, then by volume ID
+ if volumes[i].IsComplete == volumes[j].IsComplete {
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ } else {
+ less = volumes[i].IsComplete && !volumes[j].IsComplete
+ }
+ default:
+ less = volumes[i].VolumeID < volumes[j].VolumeID
+ }
+
+ if sortOrder == "desc" {
+ return !less
+ }
+ return less
+ })
+}
+
// getShardCount returns the number of shards represented by the bitmap
func getShardCount(ecIndexBits uint32) int {
count := 0
@@ -230,44 +487,28 @@ func sortEcShards(shards []EcShardWithInfo, sortBy string, sortOrder string) {
sort.Slice(shards, func(i, j int) bool {
var less bool
switch sortBy {
- case "volume_id":
- if shards[i].VolumeID == shards[j].VolumeID {
- less = shards[i].ShardID < shards[j].ShardID
- } else {
- less = shards[i].VolumeID < shards[j].VolumeID
- }
case "shard_id":
- if shards[i].ShardID == shards[j].ShardID {
- less = shards[i].VolumeID < shards[j].VolumeID
- } else {
- less = shards[i].ShardID < shards[j].ShardID
- }
- case "collection":
- if shards[i].Collection == shards[j].Collection {
- less = shards[i].VolumeID < shards[j].VolumeID
- } else {
- less = shards[i].Collection < shards[j].Collection
- }
+ less = shards[i].ShardID < shards[j].ShardID
case "server":
if shards[i].Server == shards[j].Server {
- less = shards[i].VolumeID < shards[j].VolumeID
+ less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
} else {
less = shards[i].Server < shards[j].Server
}
- case "datacenter":
+ case "data_center":
if shards[i].DataCenter == shards[j].DataCenter {
- less = shards[i].VolumeID < shards[j].VolumeID
+ less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
} else {
less = shards[i].DataCenter < shards[j].DataCenter
}
case "rack":
if shards[i].Rack == shards[j].Rack {
- less = shards[i].VolumeID < shards[j].VolumeID
+ less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
} else {
less = shards[i].Rack < shards[j].Rack
}
default:
- less = shards[i].VolumeID < shards[j].VolumeID
+ less = shards[i].ShardID < shards[j].ShardID
}
if sortOrder == "desc" {
@@ -278,7 +519,15 @@ func sortEcShards(shards []EcShardWithInfo, sortBy string, sortOrder string) {
}
// GetEcVolumeDetails retrieves detailed information about a specific EC volume
-func (s *AdminServer) GetEcVolumeDetails(volumeID uint32) (*EcVolumeDetailsData, error) {
+func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrder string) (*EcVolumeDetailsData, error) {
+ // Set defaults
+ if sortBy == "" {
+ sortBy = "shard_id"
+ }
+ if sortOrder == "" {
+ sortOrder = "asc"
+ }
+
var shards []EcShardWithInfo
var collection string
dataCenters := make(map[string]bool)
@@ -364,10 +613,8 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32) (*EcVolumeDetailsData,
shards[i].MissingShards = missingShards
}
- // Sort shards by ID
- sort.Slice(shards, func(i, j int) bool {
- return shards[i].ShardID < shards[j].ShardID
- })
+ // Sort shards based on parameters
+ sortEcShards(shards, sortBy, sortOrder)
// Convert maps to slices
var dcList []string
diff --git a/weed/admin/dash/types.go b/weed/admin/dash/types.go
index d5b6c0e4b..25177c298 100644
--- a/weed/admin/dash/types.go
+++ b/weed/admin/dash/types.go
@@ -450,3 +450,48 @@ type MaintenanceWorkersData struct {
}
// Maintenance system types are now in weed/admin/maintenance package
+
+// EcVolumeWithShards represents an EC volume with its shard distribution
+type EcVolumeWithShards struct {
+ VolumeID uint32 `json:"volume_id"`
+ Collection string `json:"collection"`
+ TotalShards int `json:"total_shards"`
+ IsComplete bool `json:"is_complete"`
+ MissingShards []int `json:"missing_shards"`
+ ShardLocations map[int]string `json:"shard_locations"` // shardId -> server
+ DataCenters []string `json:"data_centers"`
+ Servers []string `json:"servers"`
+ ModifiedTime int64 `json:"modified_time"`
+}
+
+// ClusterEcVolumesData represents the response for clustered EC volumes view
+type ClusterEcVolumesData struct {
+ EcVolumes []EcVolumeWithShards `json:"ec_volumes"`
+ TotalVolumes int `json:"total_volumes"`
+ LastUpdated time.Time `json:"last_updated"`
+
+ // Pagination
+ Page int `json:"page"`
+ PageSize int `json:"page_size"`
+ TotalPages int `json:"total_pages"`
+
+ // Sorting
+ SortBy string `json:"sort_by"`
+ SortOrder string `json:"sort_order"`
+
+ // Filtering
+ Collection string `json:"collection"`
+
+ // Conditional display flags
+ ShowDataCenterColumn bool `json:"show_datacenter_column"`
+ ShowRackColumn bool `json:"show_rack_column"`
+ ShowCollectionColumn bool `json:"show_collection_column"`
+
+ // Statistics
+ CompleteVolumes int `json:"complete_volumes"`
+ IncompleteVolumes int `json:"incomplete_volumes"`
+ TotalShards int `json:"total_shards"`
+
+ // User context
+ Username string `json:"username"`
+}
diff --git a/weed/admin/handlers/cluster_handlers.go b/weed/admin/handlers/cluster_handlers.go
index 482ccfcac..1640a566d 100644
--- a/weed/admin/handlers/cluster_handlers.go
+++ b/weed/admin/handlers/cluster_handlers.go
@@ -161,31 +161,19 @@ func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) {
}
}
-// ShowClusterEcShards renders the cluster EC shards page
+// ShowClusterEcShards handles the cluster EC volumes page (grouped by volume)
func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
- // Get pagination and sorting parameters from query string
- page := 1
- if p := c.Query("page"); p != "" {
- if parsed, err := strconv.Atoi(p); err == nil && parsed > 0 {
- page = parsed
- }
- }
-
- pageSize := 100
- if ps := c.Query("pageSize"); ps != "" {
- if parsed, err := strconv.Atoi(ps); err == nil && parsed > 0 && parsed <= 1000 {
- pageSize = parsed
- }
- }
-
- sortBy := c.DefaultQuery("sortBy", "volume_id")
- sortOrder := c.DefaultQuery("sortOrder", "asc")
- collection := c.Query("collection") // Optional collection filter
-
- // Get cluster EC shards data
- ecShardsData, err := h.adminServer.GetClusterEcShards(page, pageSize, sortBy, sortOrder, collection)
+ // Parse query parameters
+ page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
+ pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "10"))
+ sortBy := c.DefaultQuery("sort_by", "volume_id")
+ sortOrder := c.DefaultQuery("sort_order", "asc")
+ collection := c.DefaultQuery("collection", "")
+
+ // Get data from admin server
+ data, err := h.adminServer.GetClusterEcVolumes(page, pageSize, sortBy, sortOrder, collection)
if err != nil {
- c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster EC shards: " + err.Error()})
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
@@ -194,15 +182,15 @@ func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
if username == "" {
username = "admin"
}
- ecShardsData.Username = username
+ data.Username = username
- // Render HTML template
+ // Render template
c.Header("Content-Type", "text/html")
- ecShardsComponent := app.ClusterEcShards(*ecShardsData)
- layoutComponent := layout.Layout(c, ecShardsComponent)
+ ecVolumesComponent := app.ClusterEcVolumes(*data)
+ layoutComponent := layout.Layout(c, ecVolumesComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
- c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}
@@ -222,8 +210,12 @@ func (h *ClusterHandlers) ShowEcVolumeDetails(c *gin.Context) {
return
}
+ // Parse sorting parameters
+ sortBy := c.DefaultQuery("sort_by", "shard_id")
+ sortOrder := c.DefaultQuery("sort_order", "asc")
+
// Get EC volume details
- ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID))
+ ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID), sortBy, sortOrder)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get EC volume details: " + err.Error()})
return
diff --git a/weed/admin/view/app/cluster_ec_shards.templ b/weed/admin/view/app/cluster_ec_shards.templ
index a5c2258d1..f62d663d4 100644
--- a/weed/admin/view/app/cluster_ec_shards.templ
+++ b/weed/admin/view/app/cluster_ec_shards.templ
@@ -198,7 +198,7 @@ templ ClusterEcShards(data dash.ClusterEcShardsData) {
}
-
Status |
+ Volume & Shard Status |
Actions |
@@ -238,14 +238,18 @@ templ ClusterEcShards(data dash.ClusterEcShardsData) {
if shard.IsComplete {
- Complete
+ Volume Complete
} else {
- Missing {fmt.Sprintf("%d", len(shard.MissingShards))} shards
+ Volume Missing {fmt.Sprintf("%d", len(shard.MissingShards))} shards
}
+
+
+ Shard {fmt.Sprintf("%d", shard.ShardID)} present
+
|
diff --git a/weed/admin/view/app/cluster_ec_shards_templ.go b/weed/admin/view/app/cluster_ec_shards_templ.go
index 8597ccfcd..93e582b02 100644
--- a/weed/admin/view/app/cluster_ec_shards_templ.go
+++ b/weed/admin/view/app/cluster_ec_shards_templ.go
@@ -307,7 +307,7 @@ func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, " Status | Actions | ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "Volume & Shard Status | Actions | ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -436,19 +436,19 @@ func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component {
return templ_7745c5c3_Err
}
if shard.IsComplete {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "Complete")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "Volume Complete")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, " Missing ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, " Volume Missing ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 246, Col: 88}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 246, Col: 95}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
@@ -459,243 +459,256 @@ func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, " present | ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if !shard.IsComplete {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "\" title=\"Repair missing shards\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, " | ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.TotalPages > 1 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
diff --git a/weed/admin/view/app/cluster_ec_volumes.templ b/weed/admin/view/app/cluster_ec_volumes.templ
new file mode 100644
index 000000000..39c9d82a2
--- /dev/null
+++ b/weed/admin/view/app/cluster_ec_volumes.templ
@@ -0,0 +1,490 @@
+package app
+
+import (
+ "fmt"
+ "strings"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+)
+
+templ ClusterEcVolumes(data dash.ClusterEcVolumesData) {
+
+
+
+ EC Volumes - SeaweedFS
+
+
+
+
+
+
+
+
+
+
+ EC Volumes
+ ({fmt.Sprintf("%d", data.TotalVolumes)} volumes)
+
+
+
+
+
+
+
+
+
+
+
+ Total Volumes
+ {fmt.Sprintf("%d", data.TotalVolumes)}
+ EC encoded volumes
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Total Shards
+ {fmt.Sprintf("%d", data.TotalShards)}
+ Distributed shards
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Complete Volumes
+ {fmt.Sprintf("%d", data.CompleteVolumes)}
+ All shards present
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Incomplete Volumes
+ {fmt.Sprintf("%d", data.IncompleteVolumes)}
+ Missing shards
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int {
+ end := data.Page * data.PageSize
+ if end > data.TotalVolumes {
+ return data.TotalVolumes
+ }
+ return end
+ }())} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes
+
+
+
+
+
+ per page
+
+
+
+ if data.Collection != "" {
+
+ }
+
+
+
+
+
+ if data.TotalPages > 1 {
+
+ }
+
+
+
+
+
+
+}
+
+// displayShardLocationsHTML renders shard locations as proper HTML
+templ displayShardLocationsHTML(shardLocations map[int]string) {
+ if len(shardLocations) == 0 {
+ No shards
+ } else {
+ for i, serverInfo := range groupShardsByServer(shardLocations) {
+ if i > 0 {
+
+ }
+
+
+ { serverInfo.Server }
+ :
+ { serverInfo.ShardRanges }
+ }
+ }
+}
+
+// ServerShardInfo represents server and its shard ranges
+type ServerShardInfo struct {
+ Server string
+ ShardRanges string
+}
+
+// groupShardsByServer groups shards by server and formats ranges
+func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
+ if len(shardLocations) == 0 {
+ return []ServerShardInfo{}
+ }
+
+ // Group shards by server
+ serverShards := make(map[string][]int)
+ for shardId, server := range shardLocations {
+ serverShards[server] = append(serverShards[server], shardId)
+ }
+
+ var serverInfos []ServerShardInfo
+ for server, shards := range serverShards {
+ // Sort shards for each server
+ for i := 0; i < len(shards); i++ {
+ for j := i + 1; j < len(shards); j++ {
+ if shards[i] > shards[j] {
+ shards[i], shards[j] = shards[j], shards[i]
+ }
+ }
+ }
+
+ // Format shard ranges compactly
+ shardRanges := formatShardRanges(shards)
+ serverInfos = append(serverInfos, ServerShardInfo{
+ Server: server,
+ ShardRanges: shardRanges,
+ })
+ }
+
+ // Sort by server name
+ for i := 0; i < len(serverInfos); i++ {
+ for j := i + 1; j < len(serverInfos); j++ {
+ if serverInfos[i].Server > serverInfos[j].Server {
+ serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
+ }
+ }
+ }
+
+ return serverInfos
+}
+
+// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11")
+func formatShardRanges(shards []int) string {
+ if len(shards) == 0 {
+ return ""
+ }
+
+ var ranges []string
+ start := shards[0]
+ end := shards[0]
+
+ for i := 1; i < len(shards); i++ {
+ if shards[i] == end+1 {
+ end = shards[i]
+ } else {
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ start = shards[i]
+ end = shards[i]
+ }
+ }
+
+ // Add the last range
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+
+ return strings.Join(ranges, ",")
+}
+
+// Helper function to format missing shards
+func formatMissingShards(missingShards []int) string {
+ if len(missingShards) == 0 {
+ return ""
+ }
+
+ var shardStrs []string
+ for _, shard := range missingShards {
+ shardStrs = append(shardStrs, fmt.Sprintf("%d", shard))
+ }
+
+ return strings.Join(shardStrs, ", ")
+}
\ No newline at end of file
diff --git a/weed/admin/view/app/cluster_ec_volumes_templ.go b/weed/admin/view/app/cluster_ec_volumes_templ.go
new file mode 100644
index 000000000..8e28e131c
--- /dev/null
+++ b/weed/admin/view/app/cluster_ec_volumes_templ.go
@@ -0,0 +1,845 @@
+// Code generated by templ - DO NOT EDIT.
+
+// templ: version: v0.3.906
+package app
+
+//lint:file-ignore SA4006 This context is only used if a nested component is present.
+
+import "github.com/a-h/templ"
+import templruntime "github.com/a-h/templ/runtime"
+
+import (
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/admin/dash"
+ "strings"
+)
+
+func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var1 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var1 == nil {
+ templ_7745c5c3_Var1 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "EC Volumes - SeaweedFSEC Volumes (")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var2 string
+ templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 25, Col: 84}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " volumes)Total Volumes")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var3 string
+ templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 38, Col: 86}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "EC encoded volumes
Total Shards")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var4 string
+ templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 54, Col: 85}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "Distributed shards
Complete Volumes")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var5 string
+ templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CompleteVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 70, Col: 89}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "All shards present
Incomplete Volumes")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var6 string
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.IncompleteVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 86, Col: 91}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "Missing shards
Showing ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var7 string
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.Page-1)*data.PageSize+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 102, Col: 79}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " to ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var8 string
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", func() int {
+ end := data.Page * data.PageSize
+ if end > data.TotalVolumes {
+ return data.TotalVolumes
+ }
+ return end
+ }()))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 108, Col: 24}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " of ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var9 string
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 108, Col: 66}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " volumes per page ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Collection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " Collection: ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var10 string
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 126, Col: 76}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, " Clear Filter ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, " | Volume ID ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowCollectionColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "Collection ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "collection" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "Shard Count ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "total_shards" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, " | Shard Locations | Status ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.SortBy == "completeness" {
+ if data.SortOrder == "asc" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowDataCenterColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "Data Centers | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "Actions | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for _, volume := range data.EcVolumes {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "| ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var11 string
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 205, Col: 75}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowCollectionColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if volume.Collection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 210, Col: 94}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "default")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var13 string
+ templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", volume.TotalShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 217, Col: 104}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = displayShardLocationsHTML(volume.ShardLocations).Render(ctx, templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if volume.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "Complete")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, " Missing ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 232, Col: 93}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, " shards ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if len(volume.MissingShards) > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, " Missing: ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var15 string
+ templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(formatMissingShards(volume.MissingShards))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 237, Col: 95}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.ShowDataCenterColumn {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ for i, dc := range volume.DataCenters {
+ if i > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, ", ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 248, Col: 85}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if !volume.IsComplete {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, " | ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "
")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.TotalPages > 1 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ return nil
+ })
+}
+
+// displayShardLocationsHTML renders shard locations as proper HTML
+func displayShardLocationsHTML(shardLocations map[int]string) templ.Component {
+ return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
+ templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
+ if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
+ return templ_7745c5c3_CtxErr
+ }
+ templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
+ if !templ_7745c5c3_IsBuffer {
+ defer func() {
+ templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
+ if templ_7745c5c3_Err == nil {
+ templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ }
+ }()
+ }
+ ctx = templ.InitializeContext(ctx)
+ templ_7745c5c3_Var25 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var25 == nil {
+ templ_7745c5c3_Var25 = templ.NopComponent
+ }
+ ctx = templ.ClearChildren(ctx)
+ if len(shardLocations) == 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "No shards")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ for i, serverInfo := range groupShardsByServer(shardLocations) {
+ if i > 0 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var27 string
+ templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.Server)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 388, Col: 24}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, ": ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var28 string
+ templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.ShardRanges)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 390, Col: 37}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ }
+ return nil
+ })
+}
+
+// ServerShardInfo represents server and its shard ranges
+type ServerShardInfo struct {
+ Server string
+ ShardRanges string
+}
+
+// groupShardsByServer groups shards by server and formats ranges
+func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
+ if len(shardLocations) == 0 {
+ return []ServerShardInfo{}
+ }
+
+ // Group shards by server
+ serverShards := make(map[string][]int)
+ for shardId, server := range shardLocations {
+ serverShards[server] = append(serverShards[server], shardId)
+ }
+
+ var serverInfos []ServerShardInfo
+ for server, shards := range serverShards {
+ // Sort shards for each server
+ for i := 0; i < len(shards); i++ {
+ for j := i + 1; j < len(shards); j++ {
+ if shards[i] > shards[j] {
+ shards[i], shards[j] = shards[j], shards[i]
+ }
+ }
+ }
+
+ // Format shard ranges compactly
+ shardRanges := formatShardRanges(shards)
+ serverInfos = append(serverInfos, ServerShardInfo{
+ Server: server,
+ ShardRanges: shardRanges,
+ })
+ }
+
+ // Sort by server name
+ for i := 0; i < len(serverInfos); i++ {
+ for j := i + 1; j < len(serverInfos); j++ {
+ if serverInfos[i].Server > serverInfos[j].Server {
+ serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
+ }
+ }
+ }
+
+ return serverInfos
+}
+
+// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11")
+func formatShardRanges(shards []int) string {
+ if len(shards) == 0 {
+ return ""
+ }
+
+ var ranges []string
+ start := shards[0]
+ end := shards[0]
+
+ for i := 1; i < len(shards); i++ {
+ if shards[i] == end+1 {
+ end = shards[i]
+ } else {
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+ start = shards[i]
+ end = shards[i]
+ }
+ }
+
+ // Add the last range
+ if start == end {
+ ranges = append(ranges, fmt.Sprintf("%d", start))
+ } else {
+ ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
+ }
+
+ return strings.Join(ranges, ",")
+}
+
+// Helper function to format missing shards
+func formatMissingShards(missingShards []int) string {
+ if len(missingShards) == 0 {
+ return ""
+ }
+
+ var shardStrs []string
+ for _, shard := range missingShards {
+ shardStrs = append(shardStrs, fmt.Sprintf("%d", shard))
+ }
+
+ return strings.Join(shardStrs, ", ")
+}
+
+var _ = templruntime.GeneratedTemplate
diff --git a/weed/admin/view/app/ec_volume_details.templ b/weed/admin/view/app/ec_volume_details.templ
index e233c8767..b57e89a18 100644
--- a/weed/admin/view/app/ec_volume_details.templ
+++ b/weed/admin/view/app/ec_volume_details.templ
@@ -14,7 +14,7 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
diff --git a/weed/admin/view/app/ec_volume_details_templ.go b/weed/admin/view/app/ec_volume_details_templ.go
index a8fc62336..a8ceec492 100644
--- a/weed/admin/view/app/ec_volume_details_templ.go
+++ b/weed/admin/view/app/ec_volume_details_templ.go
@@ -34,7 +34,7 @@ func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "EC Volume Details |