-
-
-
-
-
Incomplete Volumes
- {fmt.Sprintf("%d", data.IncompleteVolumes)}
- Missing shards
-
-
-
-
+
+
+
+
+
+
+
Degraded Volumes
+ {fmt.Sprintf("%d", data.IncompleteVolumes)}
+ Incomplete/Critical
+
+
+
+
-
-
-
- EC Storage Note:
- EC volumes use erasure coding ({ fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) }) which stores data across { fmt.Sprintf("%d", erasure_coding.TotalShardsCount) } shards with redundancy.
- Physical storage is approximately { fmt.Sprintf("%.1fx", float64(erasure_coding.TotalShardsCount)/float64(erasure_coding.DataShardsCount)) } the original logical data size due to { fmt.Sprintf("%d", erasure_coding.ParityShardsCount) } parity shards.
-
-
-
-
-
-
- Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int {
- end := data.Page * data.PageSize
- if end > data.TotalVolumes {
- return data.TotalVolumes
- }
- return end
- }())} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes
-
-
-
- Show:
-
- 5
- 10
- 25
- 50
- 100
-
- per page
-
-
-
- if data.Collection != "" {
-
- if data.Collection == "default" {
-
Collection: default
- } else {
-
Collection: {data.Collection}
- }
-
Clear Filter
-
- }
-
+
+
+
+ EC Storage Note:
+ EC volumes use erasure coding ({ fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) }) which stores data across { fmt.Sprintf("%d", erasure_coding.TotalShardsCount) } shards with redundancy.
+ Physical storage is approximately { fmt.Sprintf("%.1fx", float64(erasure_coding.TotalShardsCount)/float64(erasure_coding.DataShardsCount)) } the original logical data size due to { fmt.Sprintf("%d", erasure_coding.ParityShardsCount) } parity shards.
+
-
-
-
-
-
-
- Volume ID
- if data.SortBy == "volume_id" {
- if data.SortOrder == "asc" {
-
- } else {
-
- }
+
+
+
+
+
+
+
+ Volume ID
+ if data.SortBy == "volume_id" {
+ if data.SortOrder == "asc" {
+
} else {
-
+
}
-
-
- if data.ShowCollectionColumn {
-
-
- Collection
- if data.SortBy == "collection" {
- if data.SortOrder == "asc" {
-
- } else {
-
- }
- } else {
-
- }
-
-
- }
+ } else {
+
+ }
+
+
+ if data.ShowCollectionColumn {
-
- Shard Count
- if data.SortBy == "total_shards" {
+
+ Collection
+ if data.SortBy == "collection" {
if data.SortOrder == "asc" {
} else {
@@ -190,142 +155,184 @@ templ ClusterEcVolumes(data dash.ClusterEcVolumesData) {
}
- Shard Size
- Shard Locations
-
-
- Status
- if data.SortBy == "completeness" {
- if data.SortOrder == "asc" {
-
- } else {
-
- }
+ }
+
+
+ Shard Count
+ if data.SortBy == "total_shards" {
+ if data.SortOrder == "asc" {
+
} else {
-
+
}
-
-
- if data.ShowDataCenterColumn {
- Data Centers
- }
- Actions
-
-
-
- for _, volume := range data.EcVolumes {
-
-
- {fmt.Sprintf("%d", volume.VolumeID)}
-
- if data.ShowCollectionColumn {
-
- if volume.Collection != "" {
-
- {volume.Collection}
-
- } else {
-
- default
-
- }
-
+ } else {
+
}
+
+
+ Shard Size
+ Shard Locations
+
+
+ Status
+ if data.SortBy == "completeness" {
+ if data.SortOrder == "asc" {
+
+ } else {
+
+ }
+ } else {
+
+ }
+
+
+ if data.ShowDataCenterColumn {
+ Data Centers
+ }
+ Actions
+
+
+
+ for _, volume := range data.EcVolumes {
+
+
+ {fmt.Sprintf("%d", volume.VolumeID)}
+
+ if data.ShowCollectionColumn {
- {fmt.Sprintf("%d/14", volume.TotalShards)}
-
-
- @displayShardSizes(volume.ShardSizes)
-
-
- @displayVolumeDistribution(volume)
+ if volume.Collection != "" {
+
+ {volume.Collection}
+
+ } else {
+
+ default
+
+ }
+ }
+
+ {fmt.Sprintf("%d/%d", volume.TotalShards, erasure_coding.TotalShardsCount)}
+
+
+ @displayShardSizes(volume.ShardSizes)
+
+
+ @displayVolumeDistribution(volume)
+
+
+ @displayEcVolumeStatus(volume)
+
+ if data.ShowDataCenterColumn {
- @displayEcVolumeStatus(volume)
-
- if data.ShowDataCenterColumn {
-
- for i, dc := range volume.DataCenters {
- if i > 0 {
- ,
- }
- {dc}
+ for i, dc := range volume.DataCenters {
+ if i > 0 {
+ ,
}
-
- }
-
-
- {dc}
+ }
+
+ }
+
+
+
+
+
+ if !volume.IsComplete {
+
-
+ title="Repair missing shards">
+
- if !volume.IsComplete {
-
-
-
- }
-
-
-
- }
-
-
-
+ }
+
+
+
+ }
+
+
+
-
- if data.TotalPages > 1 {
-
-
+
+ {fmt.Sprintf("%d", data.TotalPages)}
+
+ }
+
+ if data.Page < data.TotalPages {
+
+
+
+
+
+
+
+
+
+
+ }
+
+
+ }
-
+
-
-
-}
-
-// displayShardLocationsHTML renders shard locations as proper HTML
-templ displayShardLocationsHTML(shardLocations map[int]string) {
- if len(shardLocations) == 0 {
-
No shards
- } else {
- for i, serverInfo := range groupShardsByServer(shardLocations) {
- if i > 0 {
-
- }
-
-
- { serverInfo.Server }
- :
- { serverInfo.ShardRanges }
- }
- }
}
// displayShardSizes renders shard sizes in a compact format
@@ -439,13 +450,7 @@ func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
var serverInfos []ServerShardInfo
for server, shards := range serverShards {
// Sort shards for each server
- for i := 0; i < len(shards); i++ {
- for j := i + 1; j < len(shards); j++ {
- if shards[i] > shards[j] {
- shards[i], shards[j] = shards[j], shards[i]
- }
- }
- }
+ sort.Ints(shards)
// Format shard ranges compactly
shardRanges := formatShardRanges(shards)
@@ -456,56 +461,9 @@ func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
}
// Sort by server name
- for i := 0; i < len(serverInfos); i++ {
- for j := i + 1; j < len(serverInfos); j++ {
- if serverInfos[i].Server > serverInfos[j].Server {
- serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
- }
- }
- }
-
- return serverInfos
-}
-
-// groupShardsByServerWithSizes groups shards by server and formats ranges with sizes
-func groupShardsByServerWithSizes(shardLocations map[int]string, shardSizes map[int]int64) []ServerShardInfo {
- if len(shardLocations) == 0 {
- return []ServerShardInfo{}
- }
-
- // Group shards by server
- serverShards := make(map[string][]int)
- for shardId, server := range shardLocations {
- serverShards[server] = append(serverShards[server], shardId)
- }
-
- var serverInfos []ServerShardInfo
- for server, shards := range serverShards {
- // Sort shards for each server
- for i := 0; i < len(shards); i++ {
- for j := i + 1; j < len(shards); j++ {
- if shards[i] > shards[j] {
- shards[i], shards[j] = shards[j], shards[i]
- }
- }
- }
-
- // Format shard ranges compactly with sizes
- shardRanges := formatShardRangesWithSizes(shards, shardSizes)
- serverInfos = append(serverInfos, ServerShardInfo{
- Server: server,
- ShardRanges: shardRanges,
- })
- }
-
- // Sort by server name
- for i := 0; i < len(serverInfos); i++ {
- for j := i + 1; j < len(serverInfos); j++ {
- if serverInfos[i].Server > serverInfos[j].Server {
- serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
- }
- }
- }
+ sort.Slice(serverInfos, func(i, j int) bool {
+ return serverInfos[i].Server < serverInfos[j].Server
+ })
return serverInfos
}
@@ -544,72 +502,6 @@ func formatShardRanges(shards []int) string {
return strings.Join(ranges, ",")
}
-// Helper function to format shard ranges with sizes (e.g., "0(1.2MB),1-3(2.5MB),7(800KB)")
-func formatShardRangesWithSizes(shards []int, shardSizes map[int]int64) string {
- if len(shards) == 0 {
- return ""
- }
-
- var ranges []string
- start := shards[0]
- end := shards[0]
- var totalSize int64
-
- for i := 1; i < len(shards); i++ {
- if shards[i] == end+1 {
- end = shards[i]
- totalSize += shardSizes[shards[i]]
- } else {
- // Add current range with size
- if start == end {
- size := shardSizes[start]
- if size > 0 {
- ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d", start))
- }
- } else {
- // Calculate total size for the range
- rangeSize := shardSizes[start]
- for j := start + 1; j <= end; j++ {
- rangeSize += shardSizes[j]
- }
- if rangeSize > 0 {
- ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
- }
- }
- start = shards[i]
- end = shards[i]
- totalSize = shardSizes[shards[i]]
- }
- }
-
- // Add the last range
- if start == end {
- size := shardSizes[start]
- if size > 0 {
- ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d", start))
- }
- } else {
- // Calculate total size for the range
- rangeSize := shardSizes[start]
- for j := start + 1; j <= end; j++ {
- rangeSize += shardSizes[j]
- }
- if rangeSize > 0 {
- ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
- }
- }
-
- return strings.Join(ranges, ",")
-}
-
// Helper function to convert bytes to human readable format
func bytesToHumanReadable(bytes int64) string {
const unit = 1024
@@ -680,13 +572,7 @@ func formatIndividualShardSizes(shardSizes map[int]int64) string {
var groupStrs []string
for size, shardIds := range sizeGroups {
// Sort shard IDs
- for i := 0; i < len(shardIds); i++ {
- for j := i + 1; j < len(shardIds); j++ {
- if shardIds[i] > shardIds[j] {
- shardIds[i], shardIds[j] = shardIds[j], shardIds[i]
- }
- }
- }
+ sort.Ints(shardIds)
var idRanges []string
if len(shardIds) <= erasure_coding.ParityShardsCount {
@@ -715,18 +601,27 @@ templ displayVolumeDistribution(volume dash.EcVolumeWithShards) {
}
-// displayEcVolumeStatus shows an improved status display for EC volumes
+// displayEcVolumeStatus shows an improved status display for EC volumes.
+// Status thresholds are based on EC recovery capability:
+// - Critical: More than DataShardsCount missing (data is unrecoverable)
+// - Degraded: More than half of DataShardsCount missing (high risk)
+// - Incomplete: More than half of ParityShardsCount missing (reduced redundancy)
+// - Minor Issues: Few shards missing (still fully recoverable)
templ displayEcVolumeStatus(volume dash.EcVolumeWithShards) {
if volume.IsComplete {
} else {
if len(volume.MissingShards) > erasure_coding.DataShardsCount {
+ // Unrecoverable: more shards missing than EC can reconstruct
} else if len(volume.MissingShards) > (erasure_coding.DataShardsCount/2) {
+ // High risk: approaching unrecoverable state
} else if len(volume.MissingShards) > (erasure_coding.ParityShardsCount/2) {
+ // Reduced redundancy but still recoverable
} else {
+ // Minor: few shards missing, fully recoverable with good margin
}
}
diff --git a/weed/admin/view/app/cluster_ec_volumes_templ.go b/weed/admin/view/app/cluster_ec_volumes_templ.go
index 3220c057f..11edd73d7 100644
--- a/weed/admin/view/app/cluster_ec_volumes_templ.go
+++ b/weed/admin/view/app/cluster_ec_volumes_templ.go
@@ -12,6 +12,7 @@ import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
+ "sort"
"strings"
)
@@ -36,434 +37,415 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "
EC Volumes (")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, " EC Volumes")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var2 string
- templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
+ if data.Collection != "" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Collection == "default" {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Collection: default ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ } else {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Collection: ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var2 string
+ templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection)
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 25, Col: 90}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Clear Filter")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Total Volumes ")
+ if data.PageSize == 100 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, " selected=\"selected\"")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, ">100 per page Refresh
Total Volumes ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 39, Col: 86}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 58, Col: 82}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, " EC encoded volumes
Total Shards ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
Total Shards ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 55, Col: 85}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 73, Col: 81}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Distributed shards
Complete Volumes ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
Healthy Volumes ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CompleteVolumes))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 71, Col: 89}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 88, Col: 85}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " All shards present
Incomplete Volumes ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " All ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
- templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.IncompleteVolumes))
+ templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.TotalShardsCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 87, Col: 91}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 89, Col: 91}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "Missing shards
EC Storage Note: EC volumes use erasure coding (")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, " shards present
Degraded Volumes ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
- templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount))
+ templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.IncompleteVolumes))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 103, Col: 131}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 104, Col: 87}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, ") which stores data across ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, " Incomplete/Critical
EC Storage Note: EC volumes use erasure coding (")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
- templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.TotalShardsCount))
+ templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 103, Col: 212}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 120, Col: 127}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " shards with redundancy. Physical storage is approximately ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, ") which stores data across ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
- templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1fx", float64(erasure_coding.TotalShardsCount)/float64(erasure_coding.DataShardsCount)))
+ templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.TotalShardsCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 104, Col: 150}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 120, Col: 208}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " the original logical data size due to ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, " shards with redundancy. Physical storage is approximately ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
- templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.ParityShardsCount))
+ templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1fx", float64(erasure_coding.TotalShardsCount)/float64(erasure_coding.DataShardsCount)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 104, Col: 244}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 121, Col: 146}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " parity shards.
Showing ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, " the original logical data size due to ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
- templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.Page-1)*data.PageSize+1))
+ templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.ParityShardsCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 111, Col: 79}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 121, Col: 240}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, " to ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var12 string
- templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", func() int {
- end := data.Page * data.PageSize
- if end > data.TotalVolumes {
- return data.TotalVolumes
- }
- return end
- }()))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 117, Col: 24}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, " of ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var13 string
- templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 117, Col: 66}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, " volumes Show: 5 10 25 50 100 per page
")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- if data.Collection != "" {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- if data.Collection == "default" {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
Collection: default ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- } else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "
Collection: ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- var templ_7745c5c3_Var14 string
- templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection)
- if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 138, Col: 91}
- }
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, " ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
Clear Filter ")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- }
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "
Volume ID ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, " parity shards.Volume ID ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "volume_id" {
if data.SortOrder == "asc" {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowCollectionColumn {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "Collection ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, " Collection ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "collection" {
if data.SortOrder == "asc" {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "Shard Count ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, " Shard Count ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "total_shards" {
if data.SortOrder == "asc" {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, " Shard Size Shard Locations Status ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, " Shard Size Shard Locations Status ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "completeness" {
if data.SortOrder == "asc" {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowDataCenterColumn {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "Data Centers ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "Data Centers ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "Actions ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "Actions ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, volume := range data.EcVolumes {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var15 string
- templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
+ var templ_7745c5c3_Var12 string
+ templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 219, Col: 75}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 199, Col: 85}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowCollectionColumn {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if volume.Collection != "" {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var16 string
- templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection)
+ var templ_7745c5c3_Var14 string
+ templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 225, Col: 101}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 205, Col: 97}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "default ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "default ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var17 string
- templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", volume.TotalShards))
+ var templ_7745c5c3_Var16 string
+ templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", volume.TotalShards, erasure_coding.TotalShardsCount))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 235, Col: 104}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 215, Col: 133}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -471,7 +453,7 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -479,7 +461,7 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -487,296 +469,297 @@ func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowDataCenterColumn {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for i, dc := range volume.DataCenters {
if i > 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, ", ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, ", ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var18 string
- templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
+ var templ_7745c5c3_Var17 string
+ templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 252, Col: 85}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 232, Col: 81}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "\" title=\"View EC volume details\"> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if !volume.IsComplete {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "\" title=\"Repair missing shards\"> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "
")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.TotalPages > 1 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "
")
- if templ_7745c5c3_Err != nil {
- return templ_7745c5c3_Err
- }
- return nil
- })
-}
-
-// displayShardLocationsHTML renders shard locations as proper HTML
-func displayShardLocationsHTML(shardLocations map[int]string) templ.Component {
- return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
- templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
- if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
- return templ_7745c5c3_CtxErr
- }
- templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
- if !templ_7745c5c3_IsBuffer {
- defer func() {
- templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
- if templ_7745c5c3_Err == nil {
- templ_7745c5c3_Err = templ_7745c5c3_BufErr
+ var templ_7745c5c3_Var23 string
+ templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 297, Col: 73}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ if data.Page < data.TotalPages {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "
No shards")
+ var templ_7745c5c3_Var24 string
+ templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 302, Col: 122}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "\">")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ var templ_7745c5c3_Var25 string
+ templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 302, Col: 156}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- } else {
- for i, serverInfo := range groupShardsByServer(shardLocations) {
- if i > 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "
")
+ if data.Page < data.TotalPages-2 {
+ if data.Page < data.TotalPages-3 {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "
... ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var29 string
- templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.Server)
+ var templ_7745c5c3_Var27 string
+ templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 392, Col: 24}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 314, Col: 164}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, " ")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ if data.Page < data.TotalPages {
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, ": ")
+ var templ_7745c5c3_Var28 string
+ templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page+1))
+ if templ_7745c5c3_Err != nil {
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 320, Col: 122}
+ }
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var30 string
- templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.ShardRanges)
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "\">
")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
+ }
+ }
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, "")
+ if templ_7745c5c3_Err != nil {
+ return templ_7745c5c3_Err
}
return nil
})
@@ -799,13 +782,13 @@ func displayShardSizes(shardSizes map[int]int64) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var31 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var31 == nil {
- templ_7745c5c3_Var31 = templ.NopComponent
+ templ_7745c5c3_Var30 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var30 == nil {
+ templ_7745c5c3_Var30 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if len(shardSizes) == 0 {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "
- ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "
- ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -836,44 +819,44 @@ func renderShardSizesContent(shardSizes map[int]int64) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var32 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var32 == nil {
- templ_7745c5c3_Var32 = templ.NopComponent
+ templ_7745c5c3_Var31 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var31 == nil {
+ templ_7745c5c3_Var31 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if areAllShardSizesSame(shardSizes) {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var33 string
- templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(getCommonShardSize(shardSizes))
+ var templ_7745c5c3_Var32 string
+ templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(getCommonShardSize(shardSizes))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 412, Col: 60}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 423, Col: 60}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, " ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, "
")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var34 string
- templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(formatIndividualShardSizes(shardSizes))
+ var templ_7745c5c3_Var33 string
+ templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(formatIndividualShardSizes(shardSizes))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 416, Col: 43}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 427, Col: 43}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "
")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -903,13 +886,7 @@ func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
var serverInfos []ServerShardInfo
for server, shards := range serverShards {
// Sort shards for each server
- for i := 0; i < len(shards); i++ {
- for j := i + 1; j < len(shards); j++ {
- if shards[i] > shards[j] {
- shards[i], shards[j] = shards[j], shards[i]
- }
- }
- }
+ sort.Ints(shards)
// Format shard ranges compactly
shardRanges := formatShardRanges(shards)
@@ -920,56 +897,9 @@ func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
}
// Sort by server name
- for i := 0; i < len(serverInfos); i++ {
- for j := i + 1; j < len(serverInfos); j++ {
- if serverInfos[i].Server > serverInfos[j].Server {
- serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
- }
- }
- }
-
- return serverInfos
-}
-
-// groupShardsByServerWithSizes groups shards by server and formats ranges with sizes
-func groupShardsByServerWithSizes(shardLocations map[int]string, shardSizes map[int]int64) []ServerShardInfo {
- if len(shardLocations) == 0 {
- return []ServerShardInfo{}
- }
-
- // Group shards by server
- serverShards := make(map[string][]int)
- for shardId, server := range shardLocations {
- serverShards[server] = append(serverShards[server], shardId)
- }
-
- var serverInfos []ServerShardInfo
- for server, shards := range serverShards {
- // Sort shards for each server
- for i := 0; i < len(shards); i++ {
- for j := i + 1; j < len(shards); j++ {
- if shards[i] > shards[j] {
- shards[i], shards[j] = shards[j], shards[i]
- }
- }
- }
-
- // Format shard ranges compactly with sizes
- shardRanges := formatShardRangesWithSizes(shards, shardSizes)
- serverInfos = append(serverInfos, ServerShardInfo{
- Server: server,
- ShardRanges: shardRanges,
- })
- }
-
- // Sort by server name
- for i := 0; i < len(serverInfos); i++ {
- for j := i + 1; j < len(serverInfos); j++ {
- if serverInfos[i].Server > serverInfos[j].Server {
- serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
- }
- }
- }
+ sort.Slice(serverInfos, func(i, j int) bool {
+ return serverInfos[i].Server < serverInfos[j].Server
+ })
return serverInfos
}
@@ -1008,72 +938,6 @@ func formatShardRanges(shards []int) string {
return strings.Join(ranges, ",")
}
-// Helper function to format shard ranges with sizes (e.g., "0(1.2MB),1-3(2.5MB),7(800KB)")
-func formatShardRangesWithSizes(shards []int, shardSizes map[int]int64) string {
- if len(shards) == 0 {
- return ""
- }
-
- var ranges []string
- start := shards[0]
- end := shards[0]
- var totalSize int64
-
- for i := 1; i < len(shards); i++ {
- if shards[i] == end+1 {
- end = shards[i]
- totalSize += shardSizes[shards[i]]
- } else {
- // Add current range with size
- if start == end {
- size := shardSizes[start]
- if size > 0 {
- ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d", start))
- }
- } else {
- // Calculate total size for the range
- rangeSize := shardSizes[start]
- for j := start + 1; j <= end; j++ {
- rangeSize += shardSizes[j]
- }
- if rangeSize > 0 {
- ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
- }
- }
- start = shards[i]
- end = shards[i]
- totalSize = shardSizes[shards[i]]
- }
- }
-
- // Add the last range
- if start == end {
- size := shardSizes[start]
- if size > 0 {
- ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d", start))
- }
- } else {
- // Calculate total size for the range
- rangeSize := shardSizes[start]
- for j := start + 1; j <= end; j++ {
- rangeSize += shardSizes[j]
- }
- if rangeSize > 0 {
- ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize)))
- } else {
- ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
- }
- }
-
- return strings.Join(ranges, ",")
-}
-
// Helper function to convert bytes to human readable format
func bytesToHumanReadable(bytes int64) string {
const unit = 1024
@@ -1144,13 +1008,7 @@ func formatIndividualShardSizes(shardSizes map[int]int64) string {
var groupStrs []string
for size, shardIds := range sizeGroups {
// Sort shard IDs
- for i := 0; i < len(shardIds); i++ {
- for j := i + 1; j < len(shardIds); j++ {
- if shardIds[i] > shardIds[j] {
- shardIds[i], shardIds[j] = shardIds[j], shardIds[i]
- }
- }
- }
+ sort.Ints(shardIds)
var idRanges []string
if len(shardIds) <= erasure_coding.ParityShardsCount {
@@ -1188,25 +1046,25 @@ func displayVolumeDistribution(volume dash.EcVolumeWithShards) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var35 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var35 == nil {
- templ_7745c5c3_Var35 = templ.NopComponent
+ templ_7745c5c3_Var34 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var34 == nil {
+ templ_7745c5c3_Var34 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 102, "
")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var36 string
- templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(calculateVolumeDistributionSummary(volume))
+ var templ_7745c5c3_Var35 string
+ templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(calculateVolumeDistributionSummary(volume))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 714, Col: 52}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 600, Col: 52}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "
")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 103, "
")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -1214,7 +1072,12 @@ func displayVolumeDistribution(volume dash.EcVolumeWithShards) templ.Component {
})
}
-// displayEcVolumeStatus shows an improved status display for EC volumes
+// displayEcVolumeStatus shows an improved status display for EC volumes.
+// Status thresholds are based on EC recovery capability:
+// - Critical: More than DataShardsCount missing (data is unrecoverable)
+// - Degraded: More than half of DataShardsCount missing (high risk)
+// - Incomplete: More than half of ParityShardsCount missing (reduced redundancy)
+// - Minor Issues: Few shards missing (still fully recoverable)
func displayEcVolumeStatus(volume dash.EcVolumeWithShards) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
@@ -1231,86 +1094,86 @@ func displayEcVolumeStatus(volume dash.EcVolumeWithShards) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
- templ_7745c5c3_Var37 := templ.GetChildren(ctx)
- if templ_7745c5c3_Var37 == nil {
- templ_7745c5c3_Var37 = templ.NopComponent
+ templ_7745c5c3_Var36 := templ.GetChildren(ctx)
+ if templ_7745c5c3_Var36 == nil {
+ templ_7745c5c3_Var36 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if volume.IsComplete {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, "
Complete")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 104, "
Complete")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
if len(volume.MissingShards) > erasure_coding.DataShardsCount {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "
Critical (")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 105, " Critical (")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var38 string
- templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ var templ_7745c5c3_Var37 string
+ templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 724, Col: 130}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 616, Col: 130}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 102, " missing) ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 106, " missing)")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if len(volume.MissingShards) > (erasure_coding.DataShardsCount / 2) {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 103, "
Degraded (")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 107, " Degraded (")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var39 string
- templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ var templ_7745c5c3_Var38 string
+ templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 726, Col: 146}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 619, Col: 146}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 104, " missing) ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 108, " missing)")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if len(volume.MissingShards) > (erasure_coding.ParityShardsCount / 2) {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 105, "
Incomplete (")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 109, " Incomplete (")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var40 string
- templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ var templ_7745c5c3_Var39 string
+ templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 728, Col: 139}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 622, Col: 139}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 106, " missing) ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 110, " missing)")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 107, "
Minor Issues (")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 111, " Minor Issues (")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- var templ_7745c5c3_Var41 string
- templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
+ var templ_7745c5c3_Var40 string
+ templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
if templ_7745c5c3_Err != nil {
- return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 730, Col: 138}
+ return templ.Error{Err: templ_7745c5c3_Err, FileName: `cluster_ec_volumes.templ`, Line: 625, Col: 138}
}
- _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41))
+ _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
- templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 108, " missing) ")
+ templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 112, " missing)")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}