Browse Source

ec is mostly working now

* distribution of ec shards needs improvement
* need configuration to enable ec
worker-execute-ec-tasks
chrislu 5 months ago
parent
commit
3b2c75d2bd
  1. 339
      weed/admin/dash/ec_shard_management.go
  2. 45
      weed/admin/dash/types.go
  3. 50
      weed/admin/handlers/cluster_handlers.go
  4. 10
      weed/admin/view/app/cluster_ec_shards.templ
  5. 165
      weed/admin/view/app/cluster_ec_shards_templ.go
  6. 490
      weed/admin/view/app/cluster_ec_volumes.templ
  7. 845
      weed/admin/view/app/cluster_ec_volumes_templ.go
  8. 2
      weed/admin/view/app/ec_volume_details.templ
  9. 2
      weed/admin/view/app/ec_volume_details_templ.go
  10. 2
      weed/admin/view/layout/layout.templ
  11. 2
      weed/admin/view/layout/layout_templ.go
  12. 23
      weed/pb/volume_server.proto
  13. 966
      weed/pb/volume_server_pb/volume_server.pb.go
  14. 43
      weed/pb/volume_server_pb/volume_server_grpc.pb.go
  15. 117
      weed/server/volume_grpc_copy.go
  16. 5
      weed/server/volume_server_handlers_admin.go
  17. 638
      weed/worker/tasks/erasure_coding/ec.go

339
weed/admin/dash/ec_shard_management.go

@ -27,7 +27,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
}
var ecShards []EcShardWithInfo
shardsPerVolume := make(map[uint32]int)
volumeShardsMap := make(map[uint32]map[int]bool) // volumeId -> set of shards present
volumesWithAllShards := 0
volumesWithMissingShards := 0
@ -45,15 +45,22 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
for _, diskInfo := range node.DiskInfos {
// Process EC shard information
for _, ecShardInfo := range diskInfo.EcShardInfos {
// Count shards per volume
shardsPerVolume[ecShardInfo.Id] += getShardCount(ecShardInfo.EcIndexBits)
volumeId := ecShardInfo.Id
// Initialize volume shards map if needed
if volumeShardsMap[volumeId] == nil {
volumeShardsMap[volumeId] = make(map[int]bool)
}
// Create individual shard entries for each shard this server has
shardBits := ecShardInfo.EcIndexBits
for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
if (shardBits & (1 << uint(shardId))) != 0 {
// Mark this shard as present for this volume
volumeShardsMap[volumeId][shardId] = true
ecShard := EcShardWithInfo{
VolumeID: ecShardInfo.Id,
VolumeID: volumeId,
ShardID: uint32(shardId),
Collection: ecShardInfo.Collection,
Size: 0, // EC shards don't have individual size in the API response
@ -82,24 +89,37 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
return nil, err
}
// Calculate completeness statistics
for volumeId, shardCount := range shardsPerVolume {
if shardCount == erasure_coding.TotalShardsCount {
// Calculate volume-level completeness (across all servers)
volumeCompleteness := make(map[uint32]bool)
volumeMissingShards := make(map[uint32][]int)
for volumeId, shardsPresent := range volumeShardsMap {
var missingShards []int
shardCount := len(shardsPresent)
// Find which shards are missing for this volume across ALL servers
for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
if !shardsPresent[shardId] {
missingShards = append(missingShards, shardId)
}
}
isComplete := (shardCount == erasure_coding.TotalShardsCount)
volumeCompleteness[volumeId] = isComplete
volumeMissingShards[volumeId] = missingShards
if isComplete {
volumesWithAllShards++
} else {
volumesWithMissingShards++
}
}
// Update completeness info for each shard
for i := range ecShards {
if ecShards[i].VolumeID == volumeId {
ecShards[i].IsComplete = (shardCount == erasure_coding.TotalShardsCount)
if !ecShards[i].IsComplete {
// Calculate missing shards
ecShards[i].MissingShards = getMissingShards(ecShards[i].EcIndexBits)
}
}
}
// Update completeness info for each shard based on volume-level completeness
for i := range ecShards {
volumeId := ecShards[i].VolumeID
ecShards[i].IsComplete = volumeCompleteness[volumeId]
ecShards[i].MissingShards = volumeMissingShards[volumeId]
}
// Filter by collection if specified
@ -149,7 +169,7 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
data := &ClusterEcShardsData{
EcShards: paginatedShards,
TotalShards: totalShards,
TotalVolumes: len(shardsPerVolume),
TotalVolumes: len(volumeShardsMap),
LastUpdated: time.Now(),
// Pagination
@ -175,11 +195,16 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
FilterCollection: collection,
// EC specific statistics
ShardsPerVolume: shardsPerVolume,
ShardsPerVolume: make(map[uint32]int), // This will be recalculated below
VolumesWithAllShards: volumesWithAllShards,
VolumesWithMissingShards: volumesWithMissingShards,
}
// Recalculate ShardsPerVolume for the response
for volumeId, shardsPresent := range volumeShardsMap {
data.ShardsPerVolume[volumeId] = len(shardsPresent)
}
// Set single values when only one exists
if len(dataCenters) == 1 {
for dc := range dataCenters {
@ -203,6 +228,238 @@ func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string,
return data, nil
}
// GetClusterEcVolumes retrieves cluster EC volumes data grouped by volume ID with shard locations
func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcVolumesData, error) {
// Set defaults
if page < 1 {
page = 1
}
if pageSize < 1 || pageSize > 1000 {
pageSize = 100
}
if sortBy == "" {
sortBy = "volume_id"
}
if sortOrder == "" {
sortOrder = "asc"
}
volumeData := make(map[uint32]*EcVolumeWithShards)
totalShards := 0
// Get detailed EC shard information via gRPC
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
if err != nil {
return err
}
if resp.TopologyInfo != nil {
for _, dc := range resp.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
for _, diskInfo := range node.DiskInfos {
// Process EC shard information
for _, ecShardInfo := range diskInfo.EcShardInfos {
volumeId := ecShardInfo.Id
// Initialize volume data if needed
if volumeData[volumeId] == nil {
volumeData[volumeId] = &EcVolumeWithShards{
VolumeID: volumeId,
Collection: ecShardInfo.Collection,
TotalShards: 0,
IsComplete: false,
MissingShards: []int{},
ShardLocations: make(map[int]string),
DataCenters: []string{},
Servers: []string{},
}
}
volume := volumeData[volumeId]
// Track data centers and servers
dcExists := false
for _, existingDc := range volume.DataCenters {
if existingDc == dc.Id {
dcExists = true
break
}
}
if !dcExists {
volume.DataCenters = append(volume.DataCenters, dc.Id)
}
serverExists := false
for _, existingServer := range volume.Servers {
if existingServer == node.Id {
serverExists = true
break
}
}
if !serverExists {
volume.Servers = append(volume.Servers, node.Id)
}
// Process each shard this server has for this volume
shardBits := ecShardInfo.EcIndexBits
for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
if (shardBits & (1 << uint(shardId))) != 0 {
// Record shard location
volume.ShardLocations[shardId] = node.Id
totalShards++
}
}
}
}
}
}
}
}
return nil
})
if err != nil {
return nil, err
}
// Calculate completeness for each volume
completeVolumes := 0
incompleteVolumes := 0
for _, volume := range volumeData {
volume.TotalShards = len(volume.ShardLocations)
// Find missing shards
var missingShards []int
for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ {
if _, exists := volume.ShardLocations[shardId]; !exists {
missingShards = append(missingShards, shardId)
}
}
volume.MissingShards = missingShards
volume.IsComplete = (len(missingShards) == 0)
if volume.IsComplete {
completeVolumes++
} else {
incompleteVolumes++
}
}
// Convert map to slice
var ecVolumes []EcVolumeWithShards
for _, volume := range volumeData {
// Filter by collection if specified
if collection == "" || volume.Collection == collection {
ecVolumes = append(ecVolumes, *volume)
}
}
// Sort the results
sortEcVolumes(ecVolumes, sortBy, sortOrder)
// Calculate statistics for conditional display
dataCenters := make(map[string]bool)
collections := make(map[string]bool)
for _, volume := range ecVolumes {
for _, dc := range volume.DataCenters {
dataCenters[dc] = true
}
if volume.Collection != "" {
collections[volume.Collection] = true
}
}
// Pagination
totalVolumes := len(ecVolumes)
totalPages := (totalVolumes + pageSize - 1) / pageSize
startIndex := (page - 1) * pageSize
endIndex := startIndex + pageSize
if endIndex > totalVolumes {
endIndex = totalVolumes
}
if startIndex >= totalVolumes {
startIndex = 0
endIndex = 0
}
paginatedVolumes := ecVolumes[startIndex:endIndex]
// Build response
data := &ClusterEcVolumesData{
EcVolumes: paginatedVolumes,
TotalVolumes: totalVolumes,
LastUpdated: time.Now(),
// Pagination
Page: page,
PageSize: pageSize,
TotalPages: totalPages,
// Sorting
SortBy: sortBy,
SortOrder: sortOrder,
// Filtering
Collection: collection,
// Conditional display flags
ShowDataCenterColumn: len(dataCenters) > 1,
ShowRackColumn: false, // We don't track racks in this view for simplicity
ShowCollectionColumn: len(collections) > 1,
// Statistics
CompleteVolumes: completeVolumes,
IncompleteVolumes: incompleteVolumes,
TotalShards: totalShards,
}
return data, nil
}
// sortEcVolumes sorts EC volumes based on the specified field and order
func sortEcVolumes(volumes []EcVolumeWithShards, sortBy string, sortOrder string) {
sort.Slice(volumes, func(i, j int) bool {
var less bool
switch sortBy {
case "volume_id":
less = volumes[i].VolumeID < volumes[j].VolumeID
case "collection":
if volumes[i].Collection == volumes[j].Collection {
less = volumes[i].VolumeID < volumes[j].VolumeID
} else {
less = volumes[i].Collection < volumes[j].Collection
}
case "total_shards":
if volumes[i].TotalShards == volumes[j].TotalShards {
less = volumes[i].VolumeID < volumes[j].VolumeID
} else {
less = volumes[i].TotalShards < volumes[j].TotalShards
}
case "completeness":
// Complete volumes first, then by volume ID
if volumes[i].IsComplete == volumes[j].IsComplete {
less = volumes[i].VolumeID < volumes[j].VolumeID
} else {
less = volumes[i].IsComplete && !volumes[j].IsComplete
}
default:
less = volumes[i].VolumeID < volumes[j].VolumeID
}
if sortOrder == "desc" {
return !less
}
return less
})
}
// getShardCount returns the number of shards represented by the bitmap
func getShardCount(ecIndexBits uint32) int {
count := 0
@ -230,44 +487,28 @@ func sortEcShards(shards []EcShardWithInfo, sortBy string, sortOrder string) {
sort.Slice(shards, func(i, j int) bool {
var less bool
switch sortBy {
case "volume_id":
if shards[i].VolumeID == shards[j].VolumeID {
less = shards[i].ShardID < shards[j].ShardID
} else {
less = shards[i].VolumeID < shards[j].VolumeID
}
case "shard_id":
if shards[i].ShardID == shards[j].ShardID {
less = shards[i].VolumeID < shards[j].VolumeID
} else {
less = shards[i].ShardID < shards[j].ShardID
}
case "collection":
if shards[i].Collection == shards[j].Collection {
less = shards[i].VolumeID < shards[j].VolumeID
} else {
less = shards[i].Collection < shards[j].Collection
}
less = shards[i].ShardID < shards[j].ShardID
case "server":
if shards[i].Server == shards[j].Server {
less = shards[i].VolumeID < shards[j].VolumeID
less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
} else {
less = shards[i].Server < shards[j].Server
}
case "datacenter":
case "data_center":
if shards[i].DataCenter == shards[j].DataCenter {
less = shards[i].VolumeID < shards[j].VolumeID
less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
} else {
less = shards[i].DataCenter < shards[j].DataCenter
}
case "rack":
if shards[i].Rack == shards[j].Rack {
less = shards[i].VolumeID < shards[j].VolumeID
less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID
} else {
less = shards[i].Rack < shards[j].Rack
}
default:
less = shards[i].VolumeID < shards[j].VolumeID
less = shards[i].ShardID < shards[j].ShardID
}
if sortOrder == "desc" {
@ -278,7 +519,15 @@ func sortEcShards(shards []EcShardWithInfo, sortBy string, sortOrder string) {
}
// GetEcVolumeDetails retrieves detailed information about a specific EC volume
func (s *AdminServer) GetEcVolumeDetails(volumeID uint32) (*EcVolumeDetailsData, error) {
func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrder string) (*EcVolumeDetailsData, error) {
// Set defaults
if sortBy == "" {
sortBy = "shard_id"
}
if sortOrder == "" {
sortOrder = "asc"
}
var shards []EcShardWithInfo
var collection string
dataCenters := make(map[string]bool)
@ -364,10 +613,8 @@ func (s *AdminServer) GetEcVolumeDetails(volumeID uint32) (*EcVolumeDetailsData,
shards[i].MissingShards = missingShards
}
// Sort shards by ID
sort.Slice(shards, func(i, j int) bool {
return shards[i].ShardID < shards[j].ShardID
})
// Sort shards based on parameters
sortEcShards(shards, sortBy, sortOrder)
// Convert maps to slices
var dcList []string

45
weed/admin/dash/types.go

@ -450,3 +450,48 @@ type MaintenanceWorkersData struct {
}
// Maintenance system types are now in weed/admin/maintenance package
// EcVolumeWithShards represents an EC volume with its shard distribution
type EcVolumeWithShards struct {
VolumeID uint32 `json:"volume_id"`
Collection string `json:"collection"`
TotalShards int `json:"total_shards"`
IsComplete bool `json:"is_complete"`
MissingShards []int `json:"missing_shards"`
ShardLocations map[int]string `json:"shard_locations"` // shardId -> server
DataCenters []string `json:"data_centers"`
Servers []string `json:"servers"`
ModifiedTime int64 `json:"modified_time"`
}
// ClusterEcVolumesData represents the response for clustered EC volumes view
type ClusterEcVolumesData struct {
EcVolumes []EcVolumeWithShards `json:"ec_volumes"`
TotalVolumes int `json:"total_volumes"`
LastUpdated time.Time `json:"last_updated"`
// Pagination
Page int `json:"page"`
PageSize int `json:"page_size"`
TotalPages int `json:"total_pages"`
// Sorting
SortBy string `json:"sort_by"`
SortOrder string `json:"sort_order"`
// Filtering
Collection string `json:"collection"`
// Conditional display flags
ShowDataCenterColumn bool `json:"show_datacenter_column"`
ShowRackColumn bool `json:"show_rack_column"`
ShowCollectionColumn bool `json:"show_collection_column"`
// Statistics
CompleteVolumes int `json:"complete_volumes"`
IncompleteVolumes int `json:"incomplete_volumes"`
TotalShards int `json:"total_shards"`
// User context
Username string `json:"username"`
}

50
weed/admin/handlers/cluster_handlers.go

@ -161,31 +161,19 @@ func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) {
}
}
// ShowClusterEcShards renders the cluster EC shards page
// ShowClusterEcShards handles the cluster EC volumes page (grouped by volume)
func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
// Get pagination and sorting parameters from query string
page := 1
if p := c.Query("page"); p != "" {
if parsed, err := strconv.Atoi(p); err == nil && parsed > 0 {
page = parsed
}
}
pageSize := 100
if ps := c.Query("pageSize"); ps != "" {
if parsed, err := strconv.Atoi(ps); err == nil && parsed > 0 && parsed <= 1000 {
pageSize = parsed
}
}
sortBy := c.DefaultQuery("sortBy", "volume_id")
sortOrder := c.DefaultQuery("sortOrder", "asc")
collection := c.Query("collection") // Optional collection filter
// Get cluster EC shards data
ecShardsData, err := h.adminServer.GetClusterEcShards(page, pageSize, sortBy, sortOrder, collection)
// Parse query parameters
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "10"))
sortBy := c.DefaultQuery("sort_by", "volume_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
collection := c.DefaultQuery("collection", "")
// Get data from admin server
data, err := h.adminServer.GetClusterEcVolumes(page, pageSize, sortBy, sortOrder, collection)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster EC shards: " + err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
@ -194,15 +182,15 @@ func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
if username == "" {
username = "admin"
}
ecShardsData.Username = username
data.Username = username
// Render HTML template
// Render template
c.Header("Content-Type", "text/html")
ecShardsComponent := app.ClusterEcShards(*ecShardsData)
layoutComponent := layout.Layout(c, ecShardsComponent)
ecVolumesComponent := app.ClusterEcVolumes(*data)
layoutComponent := layout.Layout(c, ecVolumesComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}
@ -222,8 +210,12 @@ func (h *ClusterHandlers) ShowEcVolumeDetails(c *gin.Context) {
return
}
// Parse sorting parameters
sortBy := c.DefaultQuery("sort_by", "shard_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
// Get EC volume details
ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID))
ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID), sortBy, sortOrder)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get EC volume details: " + err.Error()})
return

10
weed/admin/view/app/cluster_ec_shards.templ

@ -198,7 +198,7 @@ templ ClusterEcShards(data dash.ClusterEcShardsData) {
</a>
</th>
}
<th class="text-dark">Status</th>
<th class="text-dark">Volume & Shard Status</th>
<th class="text-dark">Actions</th>
</tr>
</thead>
@ -238,14 +238,18 @@ templ ClusterEcShards(data dash.ClusterEcShardsData) {
<td>
if shard.IsComplete {
<span class="badge bg-success">
<i class="fas fa-check me-1"></i>Complete
<i class="fas fa-check me-1"></i>Volume Complete
</span>
} else {
<span class="badge bg-warning">
<i class="fas fa-exclamation-triangle me-1"></i>
Missing {fmt.Sprintf("%d", len(shard.MissingShards))} shards
Volume Missing {fmt.Sprintf("%d", len(shard.MissingShards))} shards
</span>
}
<br/>
<small class="text-muted">
<i class="fas fa-database me-1"></i>Shard {fmt.Sprintf("%d", shard.ShardID)} present
</small>
</td>
<td>
<div class="btn-group" role="group">

165
weed/admin/view/app/cluster_ec_shards_templ.go

@ -307,7 +307,7 @@ func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<th class=\"text-dark\">Status</th><th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<th class=\"text-dark\">Volume & Shard Status</th><th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@ -436,19 +436,19 @@ func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component {
return templ_7745c5c3_Err
}
if shard.IsComplete {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "<span class=\"badge bg-success\"><i class=\"fas fa-check me-1\"></i>Complete</span>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "<span class=\"badge bg-success\"><i class=\"fas fa-check me-1\"></i>Volume Complete</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i> Missing ")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i> Volume Missing ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 246, Col: 88}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 246, Col: 95}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
@ -459,243 +459,256 @@ func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "</td><td><div class=\"btn-group\" role=\"group\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"showShardDetails(event)\" data-volume-id=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "<br><small class=\"text-muted\"><i class=\"fas fa-database me-1\"></i>Shard ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID))
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.ShardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 254, Col: 90}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 251, Col: 107}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "\" title=\"View EC volume details\"><i class=\"fas fa-info-circle\"></i></button> ")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, " present</small></td><td><div class=\"btn-group\" role=\"group\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"showShardDetails(event)\" data-volume-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 258, Col: 90}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "\" title=\"View EC volume details\"><i class=\"fas fa-info-circle\"></i></button> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if !shard.IsComplete {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "<button type=\"button\" class=\"btn btn-sm btn-outline-warning\" onclick=\"repairVolume(event)\" data-volume-id=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "<button type=\"button\" class=\"btn btn-sm btn-outline-warning\" onclick=\"repairVolume(event)\" data-volume-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID))
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 261, Col: 94}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 265, Col: 94}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "\" title=\"Repair missing shards\"><i class=\"fas fa-wrench\"></i></button>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "\" title=\"Repair missing shards\"><i class=\"fas fa-wrench\"></i></button>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "</div></td></tr>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "</div></td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "</tbody></table></div><!-- Pagination -->")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "</tbody></table></div><!-- Pagination -->")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.TotalPages > 1 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "<nav aria-label=\"EC Shards pagination\"><ul class=\"pagination justify-content-center\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<nav aria-label=\"EC Shards pagination\"><ul class=\"pagination justify-content-center\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.CurrentPage > 1 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 280, Col: 129}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 284, Col: 129}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "\"><i class=\"fas fa-chevron-left\"></i></a></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "\"><i class=\"fas fa-chevron-left\"></i></a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "<!-- First page -->")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "<!-- First page -->")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.CurrentPage > 3 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(1)\">1</a></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(1)\">1</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.CurrentPage > 4 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "<!-- Current page and neighbors -->")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "<!-- Current page and neighbors -->")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.CurrentPage > 1 && data.CurrentPage-1 >= 1 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 301, Col: 129}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 305, Col: 129}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage-1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 301, Col: 170}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 305, Col: 170}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "</a></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "<li class=\"page-item active\"><span class=\"page-link\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "<li class=\"page-item active\"><span class=\"page-link\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage))
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 306, Col: 80}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 310, Col: 80}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "</span></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "</span></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.CurrentPage < data.TotalPages && data.CurrentPage+1 <= data.TotalPages {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 311, Col: 129}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 315, Col: 129}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 311, Col: 170}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 315, Col: 170}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "</a></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "<!-- Last page -->")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "<!-- Last page -->")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.CurrentPage < data.TotalPages-2 {
if data.CurrentPage < data.TotalPages-3 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, " <li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, " <li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 323, Col: 126}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 327, Col: 126}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 323, Col: 164}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 327, Col: 164}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "</a></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
if data.CurrentPage < data.TotalPages {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage+1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 329, Col: 129}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 333, Col: 129}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, "\"><i class=\"fas fa-chevron-right\"></i></a></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "\"><i class=\"fas fa-chevron-right\"></i></a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "</ul></nav>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "</ul></nav>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "<!-- JavaScript --><script>\n function sortBy(field) {\n const currentSort = \"{data.SortBy}\";\n const currentOrder = \"{data.SortOrder}\";\n let newOrder = 'asc';\n \n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n updateUrl({\n sortBy: field,\n sortOrder: newOrder,\n page: 1\n });\n }\n\n function goToPage(event) {\n // Get data from the link element (not any child elements)\n const link = event.target.closest('a');\n const page = link.getAttribute('data-page');\n updateUrl({ page: page });\n }\n\n function changePageSize() {\n const pageSize = document.getElementById('pageSizeSelect').value;\n updateUrl({ pageSize: pageSize, page: 1 });\n }\n\n function updateUrl(params) {\n const url = new URL(window.location);\n Object.keys(params).forEach(key => {\n if (params[key]) {\n url.searchParams.set(key, params[key]);\n } else {\n url.searchParams.delete(key);\n }\n });\n window.location.href = url.toString();\n }\n\n function exportEcShards() {\n const url = new URL('/api/cluster/ec-shards/export', window.location.origin);\n const params = new URLSearchParams(window.location.search);\n params.forEach((value, key) => {\n url.searchParams.set(key, value);\n });\n window.open(url.toString(), '_blank');\n }\n\n function showShardDetails(event) {\n // Get data from the button element (not the icon inside it)\n const button = event.target.closest('button');\n const volumeId = button.getAttribute('data-volume-id');\n \n // Navigate to the EC volume details page\n window.location.href = `/cluster/ec-volumes/${volumeId}`;\n }\n\n function repairVolume(event) {\n // Get data from the button element (not the icon inside it)\n const button = event.target.closest('button');\n const volumeId = button.getAttribute('data-volume-id');\n if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {\n fetch(`/api/cluster/volumes/${volumeId}/repair`, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n }\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Repair initiated successfully');\n location.reload();\n } else {\n alert('Failed to initiate repair: ' + data.error);\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n }\n }\n </script>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "<!-- JavaScript --><script>\n function sortBy(field) {\n const currentSort = \"{data.SortBy}\";\n const currentOrder = \"{data.SortOrder}\";\n let newOrder = 'asc';\n \n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n updateUrl({\n sortBy: field,\n sortOrder: newOrder,\n page: 1\n });\n }\n\n function goToPage(event) {\n // Get data from the link element (not any child elements)\n const link = event.target.closest('a');\n const page = link.getAttribute('data-page');\n updateUrl({ page: page });\n }\n\n function changePageSize() {\n const pageSize = document.getElementById('pageSizeSelect').value;\n updateUrl({ pageSize: pageSize, page: 1 });\n }\n\n function updateUrl(params) {\n const url = new URL(window.location);\n Object.keys(params).forEach(key => {\n if (params[key]) {\n url.searchParams.set(key, params[key]);\n } else {\n url.searchParams.delete(key);\n }\n });\n window.location.href = url.toString();\n }\n\n function exportEcShards() {\n const url = new URL('/api/cluster/ec-shards/export', window.location.origin);\n const params = new URLSearchParams(window.location.search);\n params.forEach((value, key) => {\n url.searchParams.set(key, value);\n });\n window.open(url.toString(), '_blank');\n }\n\n function showShardDetails(event) {\n // Get data from the button element (not the icon inside it)\n const button = event.target.closest('button');\n const volumeId = button.getAttribute('data-volume-id');\n \n // Navigate to the EC volume details page\n window.location.href = `/cluster/ec-volumes/${volumeId}`;\n }\n\n function repairVolume(event) {\n // Get data from the button element (not the icon inside it)\n const button = event.target.closest('button');\n const volumeId = button.getAttribute('data-volume-id');\n if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {\n fetch(`/api/cluster/volumes/${volumeId}/repair`, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n }\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Repair initiated successfully');\n location.reload();\n } else {\n alert('Failed to initiate repair: ' + data.error);\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n }\n }\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

490
weed/admin/view/app/cluster_ec_volumes.templ

@ -0,0 +1,490 @@
package app
import (
"fmt"
"strings"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
)
templ ClusterEcVolumes(data dash.ClusterEcVolumesData) {
<!DOCTYPE html>
<html lang="en">
<head>
<title>EC Volumes - SeaweedFS</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
</head>
<body>
<div class="container-fluid">
<div class="row">
<div class="col-12">
<h2 class="mb-4">
<i class="fas fa-database me-2"></i>EC Volumes
<small class="text-muted">({fmt.Sprintf("%d", data.TotalVolumes)} volumes)</small>
</h2>
</div>
</div>
<!-- Statistics Cards -->
<div class="row mb-4">
<div class="col-md-3">
<div class="card text-bg-primary">
<div class="card-body">
<div class="d-flex justify-content-between">
<div>
<h6 class="card-title">Total Volumes</h6>
<h4 class="mb-0">{fmt.Sprintf("%d", data.TotalVolumes)}</h4>
<small>EC encoded volumes</small>
</div>
<div class="align-self-center">
<i class="fas fa-cubes fa-2x"></i>
</div>
</div>
</div>
</div>
</div>
<div class="col-md-3">
<div class="card text-bg-info">
<div class="card-body">
<div class="d-flex justify-content-between">
<div>
<h6 class="card-title">Total Shards</h6>
<h4 class="mb-0">{fmt.Sprintf("%d", data.TotalShards)}</h4>
<small>Distributed shards</small>
</div>
<div class="align-self-center">
<i class="fas fa-puzzle-piece fa-2x"></i>
</div>
</div>
</div>
</div>
</div>
<div class="col-md-3">
<div class="card text-bg-success">
<div class="card-body">
<div class="d-flex justify-content-between">
<div>
<h6 class="card-title">Complete Volumes</h6>
<h4 class="mb-0">{fmt.Sprintf("%d", data.CompleteVolumes)}</h4>
<small>All shards present</small>
</div>
<div class="align-self-center">
<i class="fas fa-check-circle fa-2x"></i>
</div>
</div>
</div>
</div>
</div>
<div class="col-md-3">
<div class="card text-bg-warning">
<div class="card-body">
<div class="d-flex justify-content-between">
<div>
<h6 class="card-title">Incomplete Volumes</h6>
<h4 class="mb-0">{fmt.Sprintf("%d", data.IncompleteVolumes)}</h4>
<small>Missing shards</small>
</div>
<div class="align-self-center">
<i class="fas fa-exclamation-triangle fa-2x"></i>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Volumes Table -->
<div class="d-flex justify-content-between align-items-center mb-3">
<div class="d-flex align-items-center">
<span class="me-3">
Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int {
end := data.Page * data.PageSize
if end > data.TotalVolumes {
return data.TotalVolumes
}
return end
}())} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes
</span>
<div class="d-flex align-items-center">
<label for="pageSize" class="form-label me-2 mb-0">Show:</label>
<select id="pageSize" class="form-select form-select-sm" style="width: auto;" onchange="changePageSize(this.value)">
<option value="5" if data.PageSize == 5 { selected }>5</option>
<option value="10" if data.PageSize == 10 { selected }>10</option>
<option value="25" if data.PageSize == 25 { selected }>25</option>
<option value="50" if data.PageSize == 50 { selected }>50</option>
<option value="100" if data.PageSize == 100 { selected }>100</option>
</select>
<span class="ms-2">per page</span>
</div>
</div>
if data.Collection != "" {
<div>
<span class="badge bg-info">Collection: {data.Collection}</span>
<a href="/cluster/ec-shards" class="btn btn-sm btn-outline-secondary ms-2">Clear Filter</a>
</div>
}
</div>
<div class="table-responsive">
<table class="table table-striped table-hover" id="ecVolumesTable">
<thead>
<tr>
<th>
<a href="#" onclick="sortBy('volume_id')" class="text-dark text-decoration-none">
Volume ID
if data.SortBy == "volume_id" {
if data.SortOrder == "asc" {
<i class="fas fa-sort-up ms-1"></i>
} else {
<i class="fas fa-sort-down ms-1"></i>
}
} else {
<i class="fas fa-sort ms-1 text-muted"></i>
}
</a>
</th>
if data.ShowCollectionColumn {
<th>
<a href="#" onclick="sortBy('collection')" class="text-dark text-decoration-none">
Collection
if data.SortBy == "collection" {
if data.SortOrder == "asc" {
<i class="fas fa-sort-up ms-1"></i>
} else {
<i class="fas fa-sort-down ms-1"></i>
}
} else {
<i class="fas fa-sort ms-1 text-muted"></i>
}
</a>
</th>
}
<th>
<a href="#" onclick="sortBy('total_shards')" class="text-dark text-decoration-none">
Shard Count
if data.SortBy == "total_shards" {
if data.SortOrder == "asc" {
<i class="fas fa-sort-up ms-1"></i>
} else {
<i class="fas fa-sort-down ms-1"></i>
}
} else {
<i class="fas fa-sort ms-1 text-muted"></i>
}
</a>
</th>
<th class="text-dark">Shard Locations</th>
<th>
<a href="#" onclick="sortBy('completeness')" class="text-dark text-decoration-none">
Status
if data.SortBy == "completeness" {
if data.SortOrder == "asc" {
<i class="fas fa-sort-up ms-1"></i>
} else {
<i class="fas fa-sort-down ms-1"></i>
}
} else {
<i class="fas fa-sort ms-1 text-muted"></i>
}
</a>
</th>
if data.ShowDataCenterColumn {
<th class="text-dark">Data Centers</th>
}
<th class="text-dark">Actions</th>
</tr>
</thead>
<tbody>
for _, volume := range data.EcVolumes {
<tr>
<td>
<strong>{fmt.Sprintf("%d", volume.VolumeID)}</strong>
</td>
if data.ShowCollectionColumn {
<td>
if volume.Collection != "" {
<span class="badge bg-outline-info">{volume.Collection}</span>
} else {
<span class="text-muted">default</span>
}
</td>
}
<td>
<span class="badge bg-primary">{fmt.Sprintf("%d/14", volume.TotalShards)}</span>
</td>
<td>
<div class="shard-locations" style="max-width: 400px;">
@displayShardLocationsHTML(volume.ShardLocations)
</div>
</td>
<td>
if volume.IsComplete {
<span class="badge bg-success">
<i class="fas fa-check me-1"></i>Complete
</span>
} else {
<span class="badge bg-warning">
<i class="fas fa-exclamation-triangle me-1"></i>
Missing {fmt.Sprintf("%d", len(volume.MissingShards))} shards
</span>
if len(volume.MissingShards) > 0 {
<br/>
<small class="text-muted">
Missing: {formatMissingShards(volume.MissingShards)}
</small>
}
}
</td>
if data.ShowDataCenterColumn {
<td>
for i, dc := range volume.DataCenters {
if i > 0 {
<span>, </span>
}
<span class="badge bg-primary text-white">{dc}</span>
}
</td>
}
<td>
<div class="btn-group" role="group">
<button type="button" class="btn btn-sm btn-outline-primary"
onclick="showVolumeDetails(event)"
data-volume-id={ fmt.Sprintf("%d", volume.VolumeID) }
title="View EC volume details">
<i class="fas fa-info-circle"></i>
</button>
if !volume.IsComplete {
<button type="button" class="btn btn-sm btn-outline-warning"
onclick="repairVolume(event)"
data-volume-id={ fmt.Sprintf("%d", volume.VolumeID) }
title="Repair missing shards">
<i class="fas fa-wrench"></i>
</button>
}
</div>
</td>
</tr>
}
</tbody>
</table>
</div>
<!-- Pagination -->
if data.TotalPages > 1 {
<nav aria-label="EC Volumes pagination">
<ul class="pagination justify-content-center">
if data.Page > 1 {
<li class="page-item">
<a class="page-link" href="#" onclick="goToPage(event)" data-page="1">First</a>
</li>
<li class="page-item">
<a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.Page-1) }>Previous</a>
</li>
}
for i := 1; i <= data.TotalPages; i++ {
if i == data.Page {
<li class="page-item active">
<span class="page-link">{fmt.Sprintf("%d", i)}</span>
</li>
} else if i <= 3 || i > data.TotalPages-3 || (i >= data.Page-2 && i <= data.Page+2) {
<li class="page-item">
<a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", i) }>{fmt.Sprintf("%d", i)}</a>
</li>
} else if i == 4 && data.Page > 6 {
<li class="page-item disabled">
<span class="page-link">...</span>
</li>
} else if i == data.TotalPages-3 && data.Page < data.TotalPages-5 {
<li class="page-item disabled">
<span class="page-link">...</span>
</li>
}
}
if data.Page < data.TotalPages {
<li class="page-item">
<a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.Page+1) }>Next</a>
</li>
<li class="page-item">
<a class="page-link" href="#" onclick="goToPage(event)" data-page={ fmt.Sprintf("%d", data.TotalPages) }>Last</a>
</li>
}
</ul>
</nav>
}
</div>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/js/bootstrap.bundle.min.js"></script>
<script>
// Sorting functionality
function sortBy(field) {
const currentSort = new URLSearchParams(window.location.search).get('sort_by');
const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';
let newOrder = 'asc';
if (currentSort === field && currentOrder === 'asc') {
newOrder = 'desc';
}
const url = new URL(window.location);
url.searchParams.set('sort_by', field);
url.searchParams.set('sort_order', newOrder);
url.searchParams.set('page', '1'); // Reset to first page
window.location.href = url.toString();
}
// Pagination functionality
function goToPage(event) {
event.preventDefault();
const page = event.target.closest('a').getAttribute('data-page');
const url = new URL(window.location);
url.searchParams.set('page', page);
window.location.href = url.toString();
}
// Page size functionality
function changePageSize(newPageSize) {
const url = new URL(window.location);
url.searchParams.set('page_size', newPageSize);
url.searchParams.set('page', '1'); // Reset to first page when changing page size
window.location.href = url.toString();
}
// Volume details
function showVolumeDetails(event) {
const volumeId = event.target.closest('button').getAttribute('data-volume-id');
window.location.href = `/cluster/ec-volumes/${volumeId}`;
}
// Repair volume
function repairVolume(event) {
const volumeId = event.target.closest('button').getAttribute('data-volume-id');
if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {
// TODO: Implement repair functionality
alert('Repair functionality will be implemented soon.');
}
}
</script>
</body>
</html>
}
// displayShardLocationsHTML renders shard locations as proper HTML
templ displayShardLocationsHTML(shardLocations map[int]string) {
if len(shardLocations) == 0 {
<span class="text-muted">No shards</span>
} else {
for i, serverInfo := range groupShardsByServer(shardLocations) {
if i > 0 {
<br/>
}
<strong>
<a href={ templ.URL("/cluster/volume-servers/" + serverInfo.Server) } class="text-primary text-decoration-none">
{ serverInfo.Server }
</a>:
</strong> { serverInfo.ShardRanges }
}
}
}
// ServerShardInfo represents server and its shard ranges
type ServerShardInfo struct {
Server string
ShardRanges string
}
// groupShardsByServer groups shards by server and formats ranges
func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
if len(shardLocations) == 0 {
return []ServerShardInfo{}
}
// Group shards by server
serverShards := make(map[string][]int)
for shardId, server := range shardLocations {
serverShards[server] = append(serverShards[server], shardId)
}
var serverInfos []ServerShardInfo
for server, shards := range serverShards {
// Sort shards for each server
for i := 0; i < len(shards); i++ {
for j := i + 1; j < len(shards); j++ {
if shards[i] > shards[j] {
shards[i], shards[j] = shards[j], shards[i]
}
}
}
// Format shard ranges compactly
shardRanges := formatShardRanges(shards)
serverInfos = append(serverInfos, ServerShardInfo{
Server: server,
ShardRanges: shardRanges,
})
}
// Sort by server name
for i := 0; i < len(serverInfos); i++ {
for j := i + 1; j < len(serverInfos); j++ {
if serverInfos[i].Server > serverInfos[j].Server {
serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
}
}
}
return serverInfos
}
// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11")
func formatShardRanges(shards []int) string {
if len(shards) == 0 {
return ""
}
var ranges []string
start := shards[0]
end := shards[0]
for i := 1; i < len(shards); i++ {
if shards[i] == end+1 {
end = shards[i]
} else {
if start == end {
ranges = append(ranges, fmt.Sprintf("%d", start))
} else {
ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
}
start = shards[i]
end = shards[i]
}
}
// Add the last range
if start == end {
ranges = append(ranges, fmt.Sprintf("%d", start))
} else {
ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
}
return strings.Join(ranges, ",")
}
// Helper function to format missing shards
func formatMissingShards(missingShards []int) string {
if len(missingShards) == 0 {
return ""
}
var shardStrs []string
for _, shard := range missingShards {
shardStrs = append(shardStrs, fmt.Sprintf("%d", shard))
}
return strings.Join(shardStrs, ", ")
}

845
weed/admin/view/app/cluster_ec_volumes_templ.go

@ -0,0 +1,845 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.906
package app
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"strings"
)
func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<!doctype html><html lang=\"en\"><head><title>EC Volumes - SeaweedFS</title><meta charset=\"utf-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"><link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css\" rel=\"stylesheet\"><link href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css\" rel=\"stylesheet\"></head><body><div class=\"container-fluid\"><div class=\"row\"><div class=\"col-12\"><h2 class=\"mb-4\"><i class=\"fas fa-database me-2\"></i>EC Volumes <small class=\"text-muted\">(")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 25, Col: 84}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " volumes)</small></h2></div></div><!-- Statistics Cards --><div class=\"row mb-4\"><div class=\"col-md-3\"><div class=\"card text-bg-primary\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Total Volumes</h6><h4 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 38, Col: 86}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h4><small>EC encoded volumes</small></div><div class=\"align-self-center\"><i class=\"fas fa-cubes fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-info\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Total Shards</h6><h4 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 54, Col: 85}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</h4><small>Distributed shards</small></div><div class=\"align-self-center\"><i class=\"fas fa-puzzle-piece fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-success\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Complete Volumes</h6><h4 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CompleteVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 70, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</h4><small>All shards present</small></div><div class=\"align-self-center\"><i class=\"fas fa-check-circle fa-2x\"></i></div></div></div></div></div><div class=\"col-md-3\"><div class=\"card text-bg-warning\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><h6 class=\"card-title\">Incomplete Volumes</h6><h4 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.IncompleteVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 86, Col: 91}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "</h4><small>Missing shards</small></div><div class=\"align-self-center\"><i class=\"fas fa-exclamation-triangle fa-2x\"></i></div></div></div></div></div></div><!-- Volumes Table --><div class=\"d-flex justify-content-between align-items-center mb-3\"><div class=\"d-flex align-items-center\"><span class=\"me-3\">Showing ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.Page-1)*data.PageSize+1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 102, Col: 79}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " to ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", func() int {
end := data.Page * data.PageSize
if end > data.TotalVolumes {
return data.TotalVolumes
}
return end
}()))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 108, Col: 24}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " of ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 108, Col: 66}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " volumes</span><div class=\"d-flex align-items-center\"><label for=\"pageSize\" class=\"form-label me-2 mb-0\">Show:</label> <select id=\"pageSize\" class=\"form-select form-select-sm\" style=\"width: auto;\" onchange=\"changePageSize(this.value)\"><option value=\"5\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.PageSize == 5 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, ">5</option> <option value=\"10\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.PageSize == 10 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, ">10</option> <option value=\"25\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.PageSize == 25 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, ">25</option> <option value=\"50\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.PageSize == 50 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, ">50</option> <option value=\"100\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.PageSize == 100 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, ">100</option></select> <span class=\"ms-2\">per page</span></div></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.Collection != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "<div><span class=\"badge bg-info\">Collection: ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 126, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</span> <a href=\"/cluster/ec-shards\" class=\"btn btn-sm btn-outline-secondary ms-2\">Clear Filter</a></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</div><div class=\"table-responsive\"><table class=\"table table-striped table-hover\" id=\"ecVolumesTable\"><thead><tr><th><a href=\"#\" onclick=\"sortBy('volume_id')\" class=\"text-dark text-decoration-none\">Volume ID ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "volume_id" {
if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "</a></th>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowCollectionColumn {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<th><a href=\"#\" onclick=\"sortBy('collection')\" class=\"text-dark text-decoration-none\">Collection ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "collection" {
if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "</a></th>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<th><a href=\"#\" onclick=\"sortBy('total_shards')\" class=\"text-dark text-decoration-none\">Shard Count ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "total_shards" {
if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "</a></th><th class=\"text-dark\">Shard Locations</th><th><a href=\"#\" onclick=\"sortBy('completeness')\" class=\"text-dark text-decoration-none\">Status ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.SortBy == "completeness" {
if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</a></th>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowDataCenterColumn {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "<th class=\"text-dark\">Data Centers</th>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, volume := range data.EcVolumes {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "<tr><td><strong>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 205, Col: 75}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "</strong></td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowCollectionColumn {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if volume.Collection != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<span class=\"badge bg-outline-info\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 210, Col: 94}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<span class=\"text-muted\">default</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "</td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<td><span class=\"badge bg-primary\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", volume.TotalShards))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 217, Col: 104}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</span></td><td><div class=\"shard-locations\" style=\"max-width: 400px;\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = displayShardLocationsHTML(volume.ShardLocations).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</div></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if volume.IsComplete {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "<span class=\"badge bg-success\"><i class=\"fas fa-check me-1\"></i>Complete</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i> Missing ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 232, Col: 93}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, " shards</span> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if len(volume.MissingShards) > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "<br><small class=\"text-muted\">Missing: ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(formatMissingShards(volume.MissingShards))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 237, Col: 95}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.ShowDataCenterColumn {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "<td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for i, dc := range volume.DataCenters {
if i > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "<span>, </span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " <span class=\"badge bg-primary text-white\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 248, Col: 85}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "</td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "<td><div class=\"btn-group\" role=\"group\"><button type=\"button\" class=\"btn btn-sm btn-outline-primary\" onclick=\"showVolumeDetails(event)\" data-volume-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 256, Col: 95}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "\" title=\"View EC volume details\"><i class=\"fas fa-info-circle\"></i></button> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if !volume.IsComplete {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "<button type=\"button\" class=\"btn btn-sm btn-outline-warning\" onclick=\"repairVolume(event)\" data-volume-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 263, Col: 99}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "\" title=\"Repair missing shards\"><i class=\"fas fa-wrench\"></i></button>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "</div></td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "</tbody></table></div><!-- Pagination -->")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.TotalPages > 1 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "<nav aria-label=\"EC Volumes pagination\"><ul class=\"pagination justify-content-center\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.Page > 1 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"1\">First</a></li><li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page-1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 285, Col: 126}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "\">Previous</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
for i := 1; i <= data.TotalPages; i++ {
if i == data.Page {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "<li class=\"page-item active\"><span class=\"page-link\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 292, Col: 77}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "</span></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if i <= 3 || i > data.TotalPages-3 || (i >= data.Page-2 && i <= data.Page+2) {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 296, Col: 120}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", i))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 296, Col: 144}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if i == 4 && data.Page > 6 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if i == data.TotalPages-3 && data.Page < data.TotalPages-5 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "<li class=\"page-item disabled\"><span class=\"page-link\">...</span></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
}
if data.Page < data.TotalPages {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "<li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Page+1))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 311, Col: 126}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "\">Next</a></li><li class=\"page-item\"><a class=\"page-link\" href=\"#\" onclick=\"goToPage(event)\" data-page=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 314, Col: 130}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "\">Last</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "</ul></nav>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "</div><script src=\"https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/js/bootstrap.bundle.min.js\"></script><script>\n // Sorting functionality\n function sortBy(field) {\n const currentSort = new URLSearchParams(window.location.search).get('sort_by');\n const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';\n \n let newOrder = 'asc';\n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n const url = new URL(window.location);\n url.searchParams.set('sort_by', field);\n url.searchParams.set('sort_order', newOrder);\n url.searchParams.set('page', '1'); // Reset to first page\n window.location.href = url.toString();\n }\n\n // Pagination functionality\n function goToPage(event) {\n event.preventDefault();\n const page = event.target.closest('a').getAttribute('data-page');\n const url = new URL(window.location);\n url.searchParams.set('page', page);\n window.location.href = url.toString();\n }\n\n // Page size functionality\n function changePageSize(newPageSize) {\n const url = new URL(window.location);\n url.searchParams.set('page_size', newPageSize);\n url.searchParams.set('page', '1'); // Reset to first page when changing page size\n window.location.href = url.toString();\n }\n\n // Volume details\n function showVolumeDetails(event) {\n const volumeId = event.target.closest('button').getAttribute('data-volume-id');\n window.location.href = `/cluster/ec-volumes/${volumeId}`;\n }\n\n // Repair volume\n function repairVolume(event) {\n const volumeId = event.target.closest('button').getAttribute('data-volume-id');\n if (confirm(`Are you sure you want to repair missing shards for volume ${volumeId}?`)) {\n // TODO: Implement repair functionality\n alert('Repair functionality will be implemented soon.');\n }\n }\n </script></body></html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
// displayShardLocationsHTML renders shard locations as proper HTML
func displayShardLocationsHTML(shardLocations map[int]string) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var25 := templ.GetChildren(ctx)
if templ_7745c5c3_Var25 == nil {
templ_7745c5c3_Var25 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if len(shardLocations) == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "<span class=\"text-muted\">No shards</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
for i, serverInfo := range groupShardsByServer(shardLocations) {
if i > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "<br>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, " <strong><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 templ.SafeURL
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinURLErrs(templ.URL("/cluster/volume-servers/" + serverInfo.Server))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 387, Col: 71}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "\" class=\"text-primary text-decoration-none\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.Server)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 388, Col: 24}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "</a>:</strong> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.ShardRanges)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 390, Col: 37}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
}
return nil
})
}
// ServerShardInfo represents server and its shard ranges
type ServerShardInfo struct {
Server string
ShardRanges string
}
// groupShardsByServer groups shards by server and formats ranges
func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo {
if len(shardLocations) == 0 {
return []ServerShardInfo{}
}
// Group shards by server
serverShards := make(map[string][]int)
for shardId, server := range shardLocations {
serverShards[server] = append(serverShards[server], shardId)
}
var serverInfos []ServerShardInfo
for server, shards := range serverShards {
// Sort shards for each server
for i := 0; i < len(shards); i++ {
for j := i + 1; j < len(shards); j++ {
if shards[i] > shards[j] {
shards[i], shards[j] = shards[j], shards[i]
}
}
}
// Format shard ranges compactly
shardRanges := formatShardRanges(shards)
serverInfos = append(serverInfos, ServerShardInfo{
Server: server,
ShardRanges: shardRanges,
})
}
// Sort by server name
for i := 0; i < len(serverInfos); i++ {
for j := i + 1; j < len(serverInfos); j++ {
if serverInfos[i].Server > serverInfos[j].Server {
serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i]
}
}
}
return serverInfos
}
// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11")
func formatShardRanges(shards []int) string {
if len(shards) == 0 {
return ""
}
var ranges []string
start := shards[0]
end := shards[0]
for i := 1; i < len(shards); i++ {
if shards[i] == end+1 {
end = shards[i]
} else {
if start == end {
ranges = append(ranges, fmt.Sprintf("%d", start))
} else {
ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
}
start = shards[i]
end = shards[i]
}
}
// Add the last range
if start == end {
ranges = append(ranges, fmt.Sprintf("%d", start))
} else {
ranges = append(ranges, fmt.Sprintf("%d-%d", start, end))
}
return strings.Join(ranges, ",")
}
// Helper function to format missing shards
func formatMissingShards(missingShards []int) string {
if len(missingShards) == 0 {
return ""
}
var shardStrs []string
for _, shard := range missingShards {
shardStrs = append(shardStrs, fmt.Sprintf("%d", shard))
}
return strings.Join(shardStrs, ", ")
}
var _ = templruntime.GeneratedTemplate

2
weed/admin/view/app/ec_volume_details.templ

@ -14,7 +14,7 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
<nav aria-label="breadcrumb">
<ol class="breadcrumb">
<li class="breadcrumb-item"><a href="/admin" class="text-decoration-none">Dashboard</a></li>
<li class="breadcrumb-item"><a href="/cluster/ec-shards" class="text-decoration-none">EC Shards</a></li>
<li class="breadcrumb-item"><a href="/cluster/ec-shards" class="text-decoration-none">EC Volumes</a></li>
<li class="breadcrumb-item active" aria-current="page">Volume {fmt.Sprintf("%d", data.VolumeID)}</li>
</ol>
</nav>

2
weed/admin/view/app/ec_volume_details_templ.go

@ -34,7 +34,7 @@ func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom\"><div><h1 class=\"h2\"><i class=\"fas fa-th-large me-2\"></i>EC Volume Details</h1><nav aria-label=\"breadcrumb\"><ol class=\"breadcrumb\"><li class=\"breadcrumb-item\"><a href=\"/admin\" class=\"text-decoration-none\">Dashboard</a></li><li class=\"breadcrumb-item\"><a href=\"/cluster/ec-shards\" class=\"text-decoration-none\">EC Shards</a></li><li class=\"breadcrumb-item active\" aria-current=\"page\">Volume ")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom\"><div><h1 class=\"h2\"><i class=\"fas fa-th-large me-2\"></i>EC Volume Details</h1><nav aria-label=\"breadcrumb\"><ol class=\"breadcrumb\"><li class=\"breadcrumb-item\"><a href=\"/admin\" class=\"text-decoration-none\">Dashboard</a></li><li class=\"breadcrumb-item\"><a href=\"/cluster/ec-shards\" class=\"text-decoration-none\">EC Volumes</a></li><li class=\"breadcrumb-item active\" aria-current=\"page\">Volume ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

2
weed/admin/view/layout/layout.templ

@ -113,7 +113,7 @@ templ Layout(c *gin.Context, content templ.Component) {
</li>
<li class="nav-item">
<a class="nav-link py-2" href="/cluster/ec-shards">
<i class="fas fa-th-large me-2"></i>EC Shards
<i class="fas fa-th-large me-2"></i>EC Volumes
</a>
</li>
<li class="nav-item">

2
weed/admin/view/layout/layout_templ.go

@ -62,7 +62,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</a><ul class=\"dropdown-menu\"><li><a class=\"dropdown-item\" href=\"/logout\"><i class=\"fas fa-sign-out-alt me-2\"></i>Logout</a></li></ul></li></ul></div></div></header><div class=\"row g-0\"><!-- Sidebar --><div class=\"col-md-3 col-lg-2 d-md-block bg-light sidebar collapse\"><div class=\"position-sticky pt-3\"><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MAIN</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/admin\"><i class=\"fas fa-tachometer-alt me-2\"></i>Dashboard</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#clusterSubmenu\" aria-expanded=\"false\" aria-controls=\"clusterSubmenu\"><i class=\"fas fa-sitemap me-2\"></i>Cluster <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"clusterSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/masters\"><i class=\"fas fa-crown me-2\"></i>Masters</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volume-servers\"><i class=\"fas fa-server me-2\"></i>Volume Servers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/filers\"><i class=\"fas fa-folder-open me-2\"></i>Filers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volumes\"><i class=\"fas fa-database me-2\"></i>Volumes</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/ec-shards\"><i class=\"fas fa-th-large me-2\"></i>EC Shards</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/collections\"><i class=\"fas fa-layer-group me-2\"></i>Collections</a></li></ul></div></li></ul><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MANAGEMENT</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/files\"><i class=\"fas fa-folder me-2\"></i>File Browser</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#objectStoreSubmenu\" aria-expanded=\"false\" aria-controls=\"objectStoreSubmenu\"><i class=\"fas fa-cloud me-2\"></i>Object Store <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"objectStoreSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/buckets\"><i class=\"fas fa-cube me-2\"></i>Buckets</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/users\"><i class=\"fas fa-users me-2\"></i>Users</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/policies\"><i class=\"fas fa-shield-alt me-2\"></i>Policies</a></li></ul></div></li><li class=\"nav-item\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</a><ul class=\"dropdown-menu\"><li><a class=\"dropdown-item\" href=\"/logout\"><i class=\"fas fa-sign-out-alt me-2\"></i>Logout</a></li></ul></li></ul></div></div></header><div class=\"row g-0\"><!-- Sidebar --><div class=\"col-md-3 col-lg-2 d-md-block bg-light sidebar collapse\"><div class=\"position-sticky pt-3\"><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MAIN</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/admin\"><i class=\"fas fa-tachometer-alt me-2\"></i>Dashboard</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#clusterSubmenu\" aria-expanded=\"false\" aria-controls=\"clusterSubmenu\"><i class=\"fas fa-sitemap me-2\"></i>Cluster <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"clusterSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/masters\"><i class=\"fas fa-crown me-2\"></i>Masters</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volume-servers\"><i class=\"fas fa-server me-2\"></i>Volume Servers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/filers\"><i class=\"fas fa-folder-open me-2\"></i>Filers</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/volumes\"><i class=\"fas fa-database me-2\"></i>Volumes</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/ec-shards\"><i class=\"fas fa-th-large me-2\"></i>EC Volumes</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/cluster/collections\"><i class=\"fas fa-layer-group me-2\"></i>Collections</a></li></ul></div></li></ul><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MANAGEMENT</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\"><a class=\"nav-link\" href=\"/files\"><i class=\"fas fa-folder me-2\"></i>File Browser</a></li><li class=\"nav-item\"><a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#objectStoreSubmenu\" aria-expanded=\"false\" aria-controls=\"objectStoreSubmenu\"><i class=\"fas fa-cloud me-2\"></i>Object Store <i class=\"fas fa-chevron-down ms-auto\"></i></a><div class=\"collapse\" id=\"objectStoreSubmenu\"><ul class=\"nav flex-column ms-3\"><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/buckets\"><i class=\"fas fa-cube me-2\"></i>Buckets</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/users\"><i class=\"fas fa-users me-2\"></i>Users</a></li><li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"/object-store/policies\"><i class=\"fas fa-shield-alt me-2\"></i>Policies</a></li></ul></div></li><li class=\"nav-item\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

23
weed/pb/volume_server.proto

@ -53,6 +53,8 @@ service VolumeServer {
}
rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) {
}
rpc ReceiveFile (stream ReceiveFileRequest) returns (ReceiveFileResponse) {
}
rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) {
}
@ -285,6 +287,27 @@ message CopyFileResponse {
int64 modified_ts_ns = 2;
}
message ReceiveFileRequest {
oneof data {
ReceiveFileInfo info = 1;
bytes file_content = 2;
}
}
message ReceiveFileInfo {
uint32 volume_id = 1;
string ext = 2;
string collection = 3;
bool is_ec_volume = 4;
uint32 shard_id = 5;
uint64 file_size = 6;
}
message ReceiveFileResponse {
uint64 bytes_written = 1;
string error = 2;
}
message ReadNeedleBlobRequest {
uint32 volume_id = 1;
int64 offset = 3; // actual offset

966
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File

43
weed/pb/volume_server_pb/volume_server_grpc.pb.go

@ -38,6 +38,7 @@ const (
VolumeServer_VolumeCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeCopy"
VolumeServer_ReadVolumeFileStatus_FullMethodName = "/volume_server_pb.VolumeServer/ReadVolumeFileStatus"
VolumeServer_CopyFile_FullMethodName = "/volume_server_pb.VolumeServer/CopyFile"
VolumeServer_ReceiveFile_FullMethodName = "/volume_server_pb.VolumeServer/ReceiveFile"
VolumeServer_ReadNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleBlob"
VolumeServer_ReadNeedleMeta_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleMeta"
VolumeServer_WriteNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/WriteNeedleBlob"
@ -88,6 +89,7 @@ type VolumeServerClient interface {
VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeCopyResponse], error)
ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error)
CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CopyFileResponse], error)
ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error)
ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error)
ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error)
WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error)
@ -351,6 +353,19 @@ func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest,
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type VolumeServer_CopyFileClient = grpc.ServerStreamingClient[CopyFileResponse]
func (c *volumeServerClient) ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[4], VolumeServer_ReceiveFile_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[ReceiveFileRequest, ReceiveFileResponse]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type VolumeServer_ReceiveFileClient = grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse]
func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ReadNeedleBlobResponse)
@ -383,7 +398,7 @@ func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedl
func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReadAllNeedlesResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[4], VolumeServer_ReadAllNeedles_FullMethodName, cOpts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[5], VolumeServer_ReadAllNeedles_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@ -402,7 +417,7 @@ type VolumeServer_ReadAllNeedlesClient = grpc.ServerStreamingClient[ReadAllNeedl
func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTailSenderResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[5], VolumeServer_VolumeTailSender_FullMethodName, cOpts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[6], VolumeServer_VolumeTailSender_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@ -491,7 +506,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu
func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeEcShardReadResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[6], VolumeServer_VolumeEcShardRead_FullMethodName, cOpts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[7], VolumeServer_VolumeEcShardRead_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@ -530,7 +545,7 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol
func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[7], VolumeServer_VolumeTierMoveDatToRemote_FullMethodName, cOpts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[8], VolumeServer_VolumeTierMoveDatToRemote_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@ -549,7 +564,7 @@ type VolumeServer_VolumeTierMoveDatToRemoteClient = grpc.ServerStreamingClient[V
func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[8], VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName, cOpts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[9], VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@ -598,7 +613,7 @@ func (c *volumeServerClient) FetchAndWriteNeedle(ctx context.Context, in *FetchA
func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[QueriedStripe], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[9], VolumeServer_Query_FullMethodName, cOpts...)
stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[10], VolumeServer_Query_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
@ -660,6 +675,7 @@ type VolumeServerServer interface {
VolumeCopy(*VolumeCopyRequest, grpc.ServerStreamingServer[VolumeCopyResponse]) error
ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error)
CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error
ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error
ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error)
ReadNeedleMeta(context.Context, *ReadNeedleMetaRequest) (*ReadNeedleMetaResponse, error)
WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error)
@ -754,6 +770,9 @@ func (UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *Re
func (UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error {
return status.Errorf(codes.Unimplemented, "method CopyFile not implemented")
}
func (UnimplementedVolumeServerServer) ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error {
return status.Errorf(codes.Unimplemented, "method ReceiveFile not implemented")
}
func (UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented")
}
@ -1158,6 +1177,13 @@ func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) e
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type VolumeServer_CopyFileServer = grpc.ServerStreamingServer[CopyFileResponse]
func _VolumeServer_ReceiveFile_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(VolumeServerServer).ReceiveFile(&grpc.GenericServerStream[ReceiveFileRequest, ReceiveFileResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type VolumeServer_ReceiveFileServer = grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]
func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReadNeedleBlobRequest)
if err := dec(in); err != nil {
@ -1687,6 +1713,11 @@ var VolumeServer_ServiceDesc = grpc.ServiceDesc{
Handler: _VolumeServer_CopyFile_Handler,
ServerStreams: true,
},
{
StreamName: "ReceiveFile",
Handler: _VolumeServer_ReceiveFile_Handler,
ClientStreams: true,
},
{
StreamName: "ReadAllNeedles",
Handler: _VolumeServer_ReadAllNeedles_Handler,

117
weed/server/volume_grpc_copy.go

@ -402,3 +402,120 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v
return nil
}
// ReceiveFile receives a file stream from client and writes it to storage
func (vs *VolumeServer) ReceiveFile(stream volume_server_pb.VolumeServer_ReceiveFileServer) error {
var fileInfo *volume_server_pb.ReceiveFileInfo
var targetFile *os.File
var filePath string
var bytesWritten uint64
defer func() {
if targetFile != nil {
targetFile.Close()
}
}()
for {
req, err := stream.Recv()
if err == io.EOF {
// Stream completed successfully
if targetFile != nil {
targetFile.Sync()
glog.V(1).Infof("Successfully received file %s (%d bytes)", filePath, bytesWritten)
}
return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
BytesWritten: bytesWritten,
})
}
if err != nil {
// Clean up on error
if targetFile != nil {
targetFile.Close()
os.Remove(filePath)
}
glog.Errorf("Failed to receive stream: %v", err)
return fmt.Errorf("failed to receive stream: %v", err)
}
switch data := req.Data.(type) {
case *volume_server_pb.ReceiveFileRequest_Info:
// First message contains file info
fileInfo = data.Info
glog.V(1).Infof("ReceiveFile: volume %d, ext %s, collection %s, shard %d, size %d",
fileInfo.VolumeId, fileInfo.Ext, fileInfo.Collection, fileInfo.ShardId, fileInfo.FileSize)
// Create file path based on file info
if fileInfo.IsEcVolume {
// Find storage location for EC shard
var targetLocation *storage.DiskLocation
for _, location := range vs.store.Locations {
if location.DiskType == types.HardDriveType {
targetLocation = location
break
}
}
if targetLocation == nil && len(vs.store.Locations) > 0 {
targetLocation = vs.store.Locations[0] // Fall back to first available location
}
if targetLocation == nil {
glog.Errorf("ReceiveFile: no storage location available")
return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
Error: "no storage location available",
})
}
// Create EC shard file path
baseFileName := erasure_coding.EcShardBaseFileName(fileInfo.Collection, int(fileInfo.VolumeId))
filePath = util.Join(targetLocation.Directory, baseFileName+fileInfo.Ext)
} else {
// Regular volume file
v := vs.store.GetVolume(needle.VolumeId(fileInfo.VolumeId))
if v == nil {
glog.Errorf("ReceiveFile: volume %d not found", fileInfo.VolumeId)
return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
Error: fmt.Sprintf("volume %d not found", fileInfo.VolumeId),
})
}
filePath = v.FileName(fileInfo.Ext)
}
// Create target file
targetFile, err = os.Create(filePath)
if err != nil {
glog.Errorf("ReceiveFile: failed to create file %s: %v", filePath, err)
return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
Error: fmt.Sprintf("failed to create file: %v", err),
})
}
glog.V(1).Infof("ReceiveFile: created target file %s", filePath)
case *volume_server_pb.ReceiveFileRequest_FileContent:
// Subsequent messages contain file content
if targetFile == nil {
glog.Errorf("ReceiveFile: file info must be sent first")
return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
Error: "file info must be sent first",
})
}
n, err := targetFile.Write(data.FileContent)
if err != nil {
targetFile.Close()
os.Remove(filePath)
glog.Errorf("ReceiveFile: failed to write to file %s: %v", filePath, err)
return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
Error: fmt.Sprintf("failed to write file: %v", err),
})
}
bytesWritten += uint64(n)
glog.V(2).Infof("ReceiveFile: wrote %d bytes to %s (total: %d)", n, filePath, bytesWritten)
default:
glog.Errorf("ReceiveFile: unknown message type")
return stream.SendAndClose(&volume_server_pb.ReceiveFileResponse{
Error: "unknown message type",
})
}
}
}

5
weed/server/volume_server_handlers_admin.go

@ -1,11 +1,12 @@
package weed_server
import (
"github.com/seaweedfs/seaweedfs/weed/topology"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net/http"
"path/filepath"
"github.com/seaweedfs/seaweedfs/weed/topology"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/stats"
)

638
weed/worker/tasks/erasure_coding/ec.go

@ -5,24 +5,24 @@ import (
"fmt"
"io"
"math"
"net/http"
"os"
"path/filepath"
"sort"
"sync"
"time"
"bytes"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
@ -106,7 +106,7 @@ func (t *Task) SetDialOption(dialOpt grpc.DialOption) {
// Execute performs the EC operation following command_ec_encode.go pattern but with local processing
func (t *Task) Execute(params types.TaskParams) error {
glog.Infof("Starting erasure coding for volume %d from server %s (download → ec → distribute)", t.volumeID, t.sourceServer)
glog.V(1).Infof("Starting erasure coding for volume %d from server %s (download → ec → distribute)", t.volumeID, t.sourceServer)
// Extract parameters - use the actual collection from task params
t.collection = params.Collection
@ -127,121 +127,129 @@ func (t *Task) Execute(params types.TaskParams) error {
if err := os.MkdirAll(taskWorkDir, 0755); err != nil {
return fmt.Errorf("failed to create task working directory %s: %v", taskWorkDir, err)
}
glog.V(1).Infof("Created task working directory: %s", taskWorkDir)
glog.V(1).Infof("WORKFLOW: Created working directory: %s", taskWorkDir)
// Defer cleanup of working directory
// Ensure cleanup of working directory
defer func() {
if err := os.RemoveAll(taskWorkDir); err != nil {
glog.Warningf("Failed to cleanup task working directory %s: %v", taskWorkDir, err)
glog.Warningf("Failed to cleanup working directory %s: %v", taskWorkDir, err)
} else {
glog.V(1).Infof("Cleaned up task working directory: %s", taskWorkDir)
glog.V(1).Infof("WORKFLOW: Cleaned up working directory: %s", taskWorkDir)
}
}()
// Step 1: Collect volume locations from master
glog.V(1).Infof("WORKFLOW STEP 1: Collecting volume locations from master")
volumeId := needle.VolumeId(t.volumeID)
// Step 0: Collect volume locations BEFORE EC encoding starts (following command_ec_encode.go pattern)
t.SetProgress(5.0)
glog.V(1).Infof("Collecting volume %d replica locations before EC encoding", t.volumeID)
volumeLocations, err := t.collectVolumeLocations(volumeId)
if err != nil {
return fmt.Errorf("failed to collect volume locations before EC encoding: %v", err)
}
glog.V(1).Infof("Found volume %d on %d servers: %v", t.volumeID, len(volumeLocations), volumeLocations)
glog.V(1).Infof("WORKFLOW: Found volume %d on %d servers: %v", t.volumeID, len(volumeLocations), volumeLocations)
// Step 1: Mark volume as readonly on all replicas (following command_ec_encode.go)
t.SetProgress(10.0)
glog.V(1).Infof("Marking volume %d as readonly on all replicas", t.volumeID)
err = t.markVolumeReadonlyOnAllReplicas(volumeLocations)
// Step 2: Mark volume readonly on all servers
glog.V(1).Infof("WORKFLOW STEP 2: Marking volume %d readonly on all replica servers", t.volumeID)
err = t.markVolumeReadonlyOnAllReplicas(volumeId, volumeLocations)
if err != nil {
return fmt.Errorf("failed to mark volume as readonly: %v", err)
return fmt.Errorf("failed to mark volume readonly: %v", err)
}
glog.V(1).Infof("WORKFLOW: Volume %d marked readonly on all replicas", t.volumeID)
// Step 2: Copy volume to local worker for processing (use task-specific directory)
t.SetProgress(20.0)
glog.V(1).Infof("Downloading volume %d files to worker for local EC processing", t.volumeID)
// Step 3: Copy volume data to local worker
glog.V(1).Infof("WORKFLOW STEP 3: Downloading volume %d data to worker", t.volumeID)
err = t.copyVolumeDataLocally(taskWorkDir)
if err != nil {
return fmt.Errorf("failed to copy volume data locally: %v", err)
}
glog.V(1).Infof("WORKFLOW: Volume %d data downloaded successfully", t.volumeID)
// Step 3: Generate EC shards locally on worker
t.SetProgress(40.0)
glog.V(1).Infof("Generating EC shards locally for volume %d", t.volumeID)
// Step 4: Perform local EC encoding
glog.V(1).Infof("WORKFLOW STEP 4: Performing local EC encoding for volume %d", t.volumeID)
shardFiles, err := t.performLocalECEncoding(taskWorkDir)
if err != nil {
return fmt.Errorf("failed to generate EC shards locally: %v", err)
}
glog.V(1).Infof("WORKFLOW: Generated %d EC shards for volume %d", len(shardFiles), t.volumeID)
// Step 4: Distribute shards across multiple servers (following command_ec_encode.go balance logic)
t.SetProgress(60.0)
glog.V(1).Infof("Distributing EC shards across multiple servers for volume %d", t.volumeID)
err = t.distributeEcShardsAcrossServers(shardFiles)
// Step 5: Distribute shards across servers
glog.V(1).Infof("WORKFLOW STEP 5: Distributing EC shards across cluster")
err = t.distributeEcShardsAcrossServers(shardFiles, taskWorkDir)
if err != nil {
return fmt.Errorf("failed to distribute EC shards: %v", err)
}
glog.V(1).Infof("WORKFLOW: EC shards distributed and mounted successfully")
// Step 5: Delete original volume from ALL replica locations (following command_ec_encode.go pattern)
t.SetProgress(90.0)
glog.V(1).Infof("Deleting original volume %d from all replica locations", t.volumeID)
// Step 6: Delete original volume from ALL replica locations (following command_ec_encode.go pattern)
glog.V(1).Infof("WORKFLOW STEP 6: Deleting original volume %d from all replica servers", t.volumeID)
err = t.deleteVolumeFromAllLocations(volumeId, volumeLocations)
if err != nil {
glog.Warningf("Failed to delete original volume %d from all locations: %v (may need manual cleanup)", t.volumeID, err)
// This is not a critical failure - the EC encoding itself succeeded
} else {
glog.V(1).Infof("WORKFLOW: Original volume %d deleted from all replicas", t.volumeID)
}
// Step 6: Cleanup local files
t.SetProgress(95.0)
t.cleanup(taskWorkDir)
// Step 7: Final success
t.SetProgress(100.0)
glog.Infof("Successfully completed erasure coding with distributed shards for volume %d", t.volumeID)
glog.V(1).Infof("WORKFLOW COMPLETE: Successfully completed erasure coding for volume %d", t.volumeID)
return nil
}
// copyVolumeDataLocally copies the volume data from source server to local disk
// copyVolumeDataLocally downloads .dat and .idx files from source server to local working directory
func (t *Task) copyVolumeDataLocally(workDir string) error {
t.currentStep = "copying_volume_data"
t.SetProgress(5.0)
glog.V(1).Infof("Copying volume %d data from %s to local disk", t.volumeID, t.sourceServer)
ctx := context.Background()
t.currentStep = "copying"
t.SetProgress(10.0)
glog.V(1).Infof("Copying volume %d data from server %s to local directory %s", t.volumeID, t.sourceServer, workDir)
// Connect to source volume server (convert to gRPC address)
grpcAddress := pb.ServerToGrpcAddress(t.sourceServer)
// Connect to source volume server
grpcAddress := pb.ServerToGrpcAddress(string(t.sourceServer))
conn, err := grpc.NewClient(grpcAddress, t.grpcDialOpt)
if err != nil {
glog.Errorf("COPY ERROR: Failed to connect to source server %s: %v", t.sourceServer, err)
return fmt.Errorf("failed to connect to source server %s: %v", t.sourceServer, err)
}
defer conn.Close()
glog.V(1).Infof("COPY GRPC: Connected to source server %s", t.sourceServer)
client := volume_server_pb.NewVolumeServerClient(conn)
// Get volume info first
statusResp, err := client.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{
VolumeId: t.volumeID,
})
if err != nil {
return fmt.Errorf("failed to get volume status: %v", err)
}
glog.V(1).Infof("Volume %d size: %d bytes, file count: %d",
t.volumeID, statusResp.VolumeSize, statusResp.FileCount)
ctx := context.Background()
// Copy .dat file
datFile := filepath.Join(workDir, fmt.Sprintf("%d.dat", t.volumeID))
if err := t.copyVolumeFile(client, ctx, t.volumeID, ".dat", datFile, statusResp.VolumeSize); err != nil {
glog.V(1).Infof("COPY START: Downloading .dat file for volume %d to %s", t.volumeID, datFile)
err = t.copyVolumeFile(client, ctx, t.volumeID, ".dat", datFile, 0)
if err != nil {
glog.Errorf("COPY ERROR: Failed to copy .dat file: %v", err)
return fmt.Errorf("failed to copy .dat file: %v", err)
}
// Verify .dat file was copied
if datInfo, err := os.Stat(datFile); err != nil {
glog.Errorf("COPY ERROR: .dat file not found after copy: %v", err)
return fmt.Errorf(".dat file not found after copy: %v", err)
} else {
glog.V(1).Infof("COPY SUCCESS: .dat file copied successfully (%d bytes)", datInfo.Size())
}
// Copy .idx file
idxFile := filepath.Join(workDir, fmt.Sprintf("%d.idx", t.volumeID))
if err := t.copyVolumeFile(client, ctx, t.volumeID, ".idx", idxFile, 0); err != nil {
glog.V(1).Infof("COPY START: Downloading .idx file for volume %d to %s", t.volumeID, idxFile)
err = t.copyVolumeFile(client, ctx, t.volumeID, ".idx", idxFile, 0)
if err != nil {
glog.Errorf("COPY ERROR: Failed to copy .idx file: %v", err)
return fmt.Errorf("failed to copy .idx file: %v", err)
}
// Verify .idx file was copied
if idxInfo, err := os.Stat(idxFile); err != nil {
glog.Errorf("COPY ERROR: .idx file not found after copy: %v", err)
return fmt.Errorf(".idx file not found after copy: %v", err)
} else {
glog.V(1).Infof("COPY SUCCESS: .idx file copied successfully (%d bytes)", idxInfo.Size())
}
t.SetProgress(15.0)
glog.V(1).Infof("Successfully copied volume %d files to %s", t.volumeID, workDir)
glog.V(1).Infof("COPY COMPLETED: Successfully copied volume %d files (.dat and .idx) to %s", t.volumeID, workDir)
return nil
}
@ -249,7 +257,7 @@ func (t *Task) copyVolumeDataLocally(workDir string) error {
func (t *Task) copyVolumeFile(client volume_server_pb.VolumeServerClient, ctx context.Context,
volumeID uint32, extension string, localPath string, expectedSize uint64) error {
glog.V(2).Infof("Starting to copy volume %d%s from source server", volumeID, extension)
glog.V(2).Infof("FILE COPY START: Copying volume %d%s from source server", volumeID, extension)
// Stream volume file data using CopyFile API with proper parameters (following shell implementation)
stream, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
@ -262,48 +270,53 @@ func (t *Task) copyVolumeFile(client volume_server_pb.VolumeServerClient, ctx co
IgnoreSourceFileNotFound: false, // Fail if source file not found
})
if err != nil {
glog.Errorf("FILE COPY ERROR: Failed to start file copy stream for %s: %v", extension, err)
return fmt.Errorf("failed to start file copy stream: %v", err)
}
glog.V(2).Infof("FILE COPY GRPC: Created copy stream for %s", extension)
// Create local file
file, err := os.Create(localPath)
if err != nil {
glog.Errorf("FILE COPY ERROR: Failed to create local file %s: %v", localPath, err)
return fmt.Errorf("failed to create local file %s: %v", localPath, err)
}
defer file.Close()
glog.V(2).Infof("FILE COPY LOCAL: Created local file %s", localPath)
// Copy data with progress tracking
var totalBytes int64
chunkCount := 0
for {
resp, err := stream.Recv()
if err == io.EOF {
glog.V(2).Infof("FILE COPY COMPLETE: Finished streaming %s (%d bytes in %d chunks)", extension, totalBytes, chunkCount)
break
}
if err != nil {
return fmt.Errorf("failed to receive file data: %v", err)
glog.Errorf("FILE COPY ERROR: Failed to receive data for %s: %v", extension, err)
return fmt.Errorf("failed to receive stream data: %v", err)
}
if len(resp.FileContent) > 0 {
written, err := file.Write(resp.FileContent)
n, err := file.Write(resp.FileContent)
if err != nil {
glog.Errorf("FILE COPY ERROR: Failed to write to local file %s: %v", localPath, err)
return fmt.Errorf("failed to write to local file: %v", err)
}
totalBytes += int64(written)
}
// Update progress for large files
if expectedSize > 0 && totalBytes > 0 {
progress := float64(totalBytes) / float64(expectedSize) * 10.0 // 10% of total progress
t.SetProgress(5.0 + progress)
totalBytes += int64(n)
chunkCount++
glog.V(3).Infof("FILE COPY CHUNK: %s chunk %d written (%d bytes, total: %d)", extension, chunkCount, n, totalBytes)
}
}
if totalBytes == 0 {
glog.Warningf("Volume %d%s appears to be empty (0 bytes copied)", volumeID, extension)
} else {
glog.V(2).Infof("Successfully copied %d bytes to %s", totalBytes, localPath)
// Sync to disk
err = file.Sync()
if err != nil {
glog.Warningf("FILE COPY WARNING: Failed to sync %s to disk: %v", localPath, err)
}
glog.V(2).Infof("FILE COPY SUCCESS: Successfully copied %s (%d bytes total)", extension, totalBytes)
return nil
}
@ -370,16 +383,45 @@ func (t *Task) performLocalECEncoding(workDir string) ([]string, error) {
glog.V(1).Infof("Starting EC encoding with base filename: %s", baseFileName)
// Generate EC shards using SeaweedFS erasure coding library
glog.V(1).Infof("Starting EC shard generation for volume %d", t.volumeID)
err = erasure_coding.WriteEcFiles(baseFileName)
if err != nil {
return nil, fmt.Errorf("failed to write EC files: %v", err)
}
glog.V(1).Infof("Completed EC shard generation for volume %d", t.volumeID)
// Generate .ecx file from .idx file
glog.V(1).Infof("Creating .ecx index file for volume %d", t.volumeID)
err = erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx")
if err != nil {
return nil, fmt.Errorf("failed to write .ecx file: %v", err)
}
glog.V(1).Infof("Successfully created .ecx index file for volume %d", t.volumeID)
// Create .ecj file (EC journal file) - initially empty for new EC volumes
ecjFile := baseFileName + ".ecj"
glog.V(1).Infof("Creating .ecj journal file: %s", ecjFile)
ecjFileHandle, err := os.Create(ecjFile)
if err != nil {
return nil, fmt.Errorf("failed to create .ecj file: %v", err)
}
ecjFileHandle.Close()
glog.V(1).Infof("Successfully created .ecj journal file: %s", ecjFile)
// Create .vif file (volume info file) with basic volume information
vifFile := baseFileName + ".vif"
glog.V(1).Infof("Creating .vif volume info file: %s", vifFile)
volumeInfo := &volume_server_pb.VolumeInfo{
Version: 3, // needle.Version3
DatFileSize: datInfo.Size(), // int64
}
// Save volume info to .vif file using the standard SeaweedFS function
err = volume_info.SaveVolumeInfo(vifFile, volumeInfo)
if err != nil {
return nil, fmt.Errorf("failed to create .vif file: %v", err)
}
glog.V(1).Infof("Successfully created .vif volume info file: %s", vifFile)
// Prepare list of generated shard files
shardFiles := make([]string, t.totalShards)
@ -387,12 +429,24 @@ func (t *Task) performLocalECEncoding(workDir string) ([]string, error) {
shardFiles[i] = filepath.Join(workDir, fmt.Sprintf("%d.ec%02d", t.volumeID, i))
}
// Verify that shards were created
// Verify that ALL shards were created and log each one
glog.V(1).Infof("Verifying all %d EC shards were created for volume %d", t.totalShards, t.volumeID)
for i, shardFile := range shardFiles {
if info, err := os.Stat(shardFile); err != nil {
glog.Warningf("Shard %d file %s not found: %v", i, shardFile, err)
glog.Errorf("MISSING SHARD: Shard %d file %s not found: %v", i, shardFile, err)
return nil, fmt.Errorf("shard %d was not created: %v", i, err)
} else {
glog.V(1).Infof("SHARD CREATED: Shard %d: %s (%d bytes)", i, shardFile, info.Size())
}
}
// Verify auxiliary files were created
auxFiles := []string{baseFileName + ".ecx", baseFileName + ".ecj", baseFileName + ".vif"}
for _, auxFile := range auxFiles {
if info, err := os.Stat(auxFile); err != nil {
glog.Errorf("MISSING AUX FILE: %s not found: %v", auxFile, err)
} else {
glog.V(2).Infof("Created shard %d: %s (%d bytes)", i, shardFile, info.Size())
glog.V(1).Infof("AUX FILE CREATED: %s (%d bytes)", auxFile, info.Size())
}
}
@ -816,7 +870,7 @@ func (t *Task) collectVolumeLocations(volumeId needle.VolumeId) ([]pb.ServerAddr
}
// markVolumeReadonlyOnAllReplicas marks volume as readonly on all replicas (following command_ec_encode.go pattern)
func (t *Task) markVolumeReadonlyOnAllReplicas(locations []pb.ServerAddress) error {
func (t *Task) markVolumeReadonlyOnAllReplicas(volumeId needle.VolumeId, locations []pb.ServerAddress) error {
// Use parallel processing like command_ec_encode.go
var wg sync.WaitGroup
errorChan := make(chan error, len(locations))
@ -836,10 +890,10 @@ func (t *Task) markVolumeReadonlyOnAllReplicas(locations []pb.ServerAddress) err
client := volume_server_pb.NewVolumeServerClient(conn)
_, err = client.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
VolumeId: t.volumeID,
VolumeId: uint32(volumeId),
})
if err != nil {
errorChan <- fmt.Errorf("failed to mark volume %d readonly on %s: %v", t.volumeID, addr, err)
errorChan <- fmt.Errorf("failed to mark volume %d readonly on %s: %v", volumeId, addr, err)
}
}(location)
}
@ -857,80 +911,268 @@ func (t *Task) markVolumeReadonlyOnAllReplicas(locations []pb.ServerAddress) err
return nil
}
// distributeEcShardsAcrossServers distributes EC shards following command_ec_encode.go balance logic
func (t *Task) distributeEcShardsAcrossServers(shardFiles []string) error {
// Get available servers from master
grpcAddress := pb.ServerToGrpcAddress(t.masterClient)
conn, err := grpc.NewClient(grpcAddress, t.grpcDialOpt)
if err != nil {
return fmt.Errorf("failed to connect to master %s: %v", t.masterClient, err)
}
defer conn.Close()
// distributeEcShardsAcrossServers distributes EC shards across volume servers
func (t *Task) distributeEcShardsAcrossServers(shardFiles []string, taskWorkDir string) error {
t.currentStep = "distributing"
t.SetProgress(70.0)
glog.V(1).Infof("Distributing %d EC shards across servers", len(shardFiles))
client := master_pb.NewSeaweedClient(conn)
topologyResp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
if err != nil {
return fmt.Errorf("failed to get topology: %v", err)
}
// Get volume servers from topology
var volumeServers []pb.ServerAddress
err := operation.WithMasterServerClient(false, pb.ServerAddress(t.masterClient), t.grpcDialOpt, func(masterClient master_pb.SeaweedClient) error {
topologyResp, err := masterClient.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
if err != nil {
return err
}
// Collect available servers
var availableServers []pb.ServerAddress
for _, dc := range topologyResp.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, dataNode := range rack.DataNodeInfos {
// Check if server has available space for EC shards
for _, diskInfo := range dataNode.DiskInfos {
if diskInfo.FreeVolumeCount > 0 {
availableServers = append(availableServers, pb.ServerAddress(dataNode.Id))
break
// Extract unique volume server addresses
serverSet := make(map[string]bool)
for _, dc := range topologyResp.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
serverAddr := pb.NewServerAddressFromDataNode(node)
serverKey := string(serverAddr)
if !serverSet[serverKey] {
serverSet[serverKey] = true
volumeServers = append(volumeServers, serverAddr)
}
}
}
}
return nil
})
if err != nil {
return fmt.Errorf("failed to get volume servers: %v", err)
}
if len(availableServers) < 4 {
return fmt.Errorf("insufficient servers for EC distribution: need at least 4, found %d", len(availableServers))
if len(volumeServers) == 0 {
return fmt.Errorf("no volume servers available for EC distribution")
}
// Distribute shards in round-robin fashion
shardTargets := make(map[int]pb.ServerAddress)
targetServers := make(map[string]bool) // Track unique target servers
for i, shardFile := range shardFiles {
targetServer := volumeServers[i%len(volumeServers)]
shardTargets[i] = targetServer
targetServers[string(targetServer)] = true
glog.V(1).Infof("Shard %d (%s) will go to server %s", i, shardFile, targetServer)
}
// Upload auxiliary files (.ecx, .ecj, .vif) to ALL servers that will receive shards
// These files are needed for EC volume mounting on each server
glog.V(1).Infof("Uploading auxiliary files to %d target servers", len(targetServers))
for serverAddr := range targetServers {
targetServer := pb.ServerAddress(serverAddr)
glog.V(1).Infof("Starting auxiliary file upload to server: %s", targetServer)
err = t.uploadAuxiliaryFiles(taskWorkDir, targetServer)
if err != nil {
return fmt.Errorf("failed to upload auxiliary files to %s: %v", targetServer, err)
}
glog.V(1).Infof("Completed auxiliary file upload to server: %s", targetServer)
}
// Distribute shards across servers using round-robin
// Upload all shards to their target servers
glog.V(1).Infof("Starting shard upload phase - uploading %d shards to %d servers", len(shardFiles), len(targetServers))
var wg sync.WaitGroup
errorChan := make(chan error, len(shardFiles))
for i, shardFile := range shardFiles {
wg.Add(1)
go func(shardIndex int, shardPath string) {
go func(shardId int, shardFile string, targetServer pb.ServerAddress) {
defer wg.Done()
// Round-robin distribution
targetServer := availableServers[shardIndex%len(availableServers)]
// Upload shard to target server
err := t.uploadShardToTargetServer(shardPath, targetServer, uint32(shardIndex))
if err != nil {
errorChan <- fmt.Errorf("failed to upload shard %d to %s: %v", shardIndex, targetServer, err)
return
glog.V(1).Infof("SHARD UPLOAD START: Uploading shard %d (%s) to server %s", shardId, shardFile, targetServer)
if err := t.uploadShardToTargetServer(shardFile, targetServer, uint32(shardId)); err != nil {
glog.Errorf("SHARD UPLOAD FAILED: Shard %d to %s failed: %v", shardId, targetServer, err)
errorChan <- fmt.Errorf("failed to upload shard %d to %s: %v", shardId, targetServer, err)
} else {
glog.V(1).Infof("SHARD UPLOAD SUCCESS: Shard %d successfully uploaded to %s", shardId, targetServer)
}
// Mount shard on target server
err = t.mountShardOnServer(targetServer, uint32(shardIndex))
if err != nil {
errorChan <- fmt.Errorf("failed to mount shard %d on %s: %v", shardIndex, targetServer, err)
}
}(i, shardFile)
}(i, shardFile, shardTargets[i])
}
wg.Wait()
close(errorChan)
// Check for errors
// Check for upload errors
for err := range errorChan {
return err // Return first error encountered
}
glog.V(1).Infof("All %d shards uploaded successfully", len(shardFiles))
// Mount all shards on their respective servers
glog.V(1).Infof("Starting shard mounting phase - mounting %d shards", len(shardTargets))
for shardId, targetServer := range shardTargets {
glog.V(1).Infof("SHARD MOUNT START: Mounting shard %d on server %s", shardId, targetServer)
err = t.mountShardOnServer(targetServer, uint32(shardId))
if err != nil {
return err
glog.Errorf("SHARD MOUNT FAILED: Shard %d on %s failed: %v", shardId, targetServer, err)
return fmt.Errorf("failed to mount shard %d on %s: %v", shardId, targetServer, err)
}
glog.V(1).Infof("SHARD MOUNT SUCCESS: Shard %d successfully mounted on %s", shardId, targetServer)
}
t.SetProgress(90.0)
glog.V(1).Infof("Successfully distributed and mounted all EC shards")
// Log final distribution summary for debugging
glog.V(1).Infof("EC DISTRIBUTION SUMMARY for volume %d:", t.volumeID)
glog.V(1).Infof(" - Total shards created: %d", len(shardFiles))
glog.V(1).Infof(" - Target servers: %d", len(targetServers))
glog.V(1).Infof(" - Shard distribution:")
for shardId, targetServer := range shardTargets {
glog.V(1).Infof(" Shard %d → %s", shardId, targetServer)
}
glog.V(1).Infof(" - Auxiliary files (.ecx, .ecj, .vif) uploaded to all %d servers", len(targetServers))
glog.V(1).Infof("EC ENCODING COMPLETED for volume %d", t.volumeID)
return nil
}
// uploadAuxiliaryFiles uploads the .ecx, .ecj, and .vif files needed for EC volume mounting
func (t *Task) uploadAuxiliaryFiles(workDir string, targetServer pb.ServerAddress) error {
baseFileName := filepath.Join(workDir, fmt.Sprintf("%d", t.volumeID))
// List of auxiliary files to upload
auxFiles := []struct {
ext string
desc string
}{
{".ecx", "index file"},
{".ecj", "journal file"},
{".vif", "volume info file"},
}
glog.V(1).Infof("Uploading auxiliary files for volume %d to server %s", t.volumeID, targetServer)
for _, auxFile := range auxFiles {
filePath := baseFileName + auxFile.ext
// Check if file exists (some may be optional)
if _, err := os.Stat(filePath); os.IsNotExist(err) {
glog.V(1).Infof("Auxiliary file %s does not exist, skipping", filePath)
continue
}
// Upload the auxiliary file
err := t.uploadAuxiliaryFile(filePath, targetServer, auxFile.ext)
if err != nil {
return fmt.Errorf("failed to upload %s: %v", auxFile.desc, err)
}
glog.V(1).Infof("Successfully uploaded %s (%s) for volume %d to %s", auxFile.desc, auxFile.ext, t.volumeID, targetServer)
}
glog.V(1).Infof("Completed uploading auxiliary files for volume %d to %s", t.volumeID, targetServer)
return nil
}
// uploadAuxiliaryFile uploads a single auxiliary file (.ecx, .ecj, .vif) to the target server
func (t *Task) uploadAuxiliaryFile(filePath string, targetServer pb.ServerAddress, ext string) error {
glog.V(1).Infof("AUX UPLOAD START: Uploading auxiliary file %s to %s", ext, targetServer)
// Open the auxiliary file
file, err := os.Open(filePath)
if err != nil {
glog.Errorf("AUX UPLOAD ERROR: Failed to open auxiliary file %s: %v", filePath, err)
return fmt.Errorf("failed to open auxiliary file %s: %v", filePath, err)
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
glog.Errorf("AUX UPLOAD ERROR: Failed to stat auxiliary file %s: %v", filePath, err)
return fmt.Errorf("failed to stat auxiliary file: %v", err)
}
glog.V(1).Infof("AUX UPLOAD DETAILS: File %s size: %d bytes", filePath, fileInfo.Size())
// Connect to target volume server
grpcAddress := pb.ServerToGrpcAddress(string(targetServer))
conn, err := grpc.NewClient(grpcAddress, t.grpcDialOpt)
if err != nil {
glog.Errorf("AUX UPLOAD ERROR: Failed to connect to %s: %v", targetServer, err)
return fmt.Errorf("failed to connect to %s: %v", targetServer, err)
}
defer conn.Close()
glog.V(2).Infof("AUX UPLOAD GRPC: Connected to %s for %s", targetServer, ext)
client := volume_server_pb.NewVolumeServerClient(conn)
ctx := context.Background()
// Create streaming client for auxiliary file upload
stream, err := client.ReceiveFile(ctx)
if err != nil {
glog.Errorf("AUX UPLOAD ERROR: Failed to create receive stream for %s: %v", ext, err)
return fmt.Errorf("failed to create receive stream: %v", err)
}
glog.V(2).Infof("AUX UPLOAD GRPC: Created stream for %s", ext)
// Send file info first
err = stream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_Info{
Info: &volume_server_pb.ReceiveFileInfo{
VolumeId: t.volumeID,
Ext: ext,
Collection: t.collection,
IsEcVolume: true,
ShardId: 0, // Not applicable for auxiliary files
FileSize: uint64(fileInfo.Size()),
},
},
})
if err != nil {
glog.Errorf("AUX UPLOAD ERROR: Failed to send auxiliary file info for %s: %v", ext, err)
return fmt.Errorf("failed to send auxiliary file info: %v", err)
}
glog.V(2).Infof("AUX UPLOAD GRPC: Sent file info for %s", ext)
// Stream file content in chunks
buffer := make([]byte, 64*1024) // 64KB chunks
totalSent := int64(0)
chunkCount := 0
for {
n, err := file.Read(buffer)
if err != nil && err != io.EOF {
glog.Errorf("AUX UPLOAD ERROR: Failed to read auxiliary file %s: %v", filePath, err)
return fmt.Errorf("failed to read auxiliary file: %v", err)
}
// Send data if we read any
if n > 0 {
err = stream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_FileContent{
FileContent: buffer[:n],
},
})
if err != nil {
glog.Errorf("AUX UPLOAD ERROR: Failed to send chunk %d for %s: %v", chunkCount, ext, err)
return fmt.Errorf("failed to send auxiliary file chunk: %v", err)
}
totalSent += int64(n)
chunkCount++
glog.V(3).Infof("AUX UPLOAD CHUNK: %s chunk %d sent (%d bytes, total: %d)", ext, chunkCount, n, totalSent)
}
// Break if we reached EOF
if err == io.EOF {
break
}
}
glog.V(2).Infof("AUX UPLOAD GRPC: Completed streaming %s (%d bytes in %d chunks)", ext, totalSent, chunkCount)
// Close stream and get response
resp, err := stream.CloseAndRecv()
if err != nil {
glog.Errorf("AUX UPLOAD ERROR: Failed to close stream for %s: %v", ext, err)
return fmt.Errorf("failed to close auxiliary file stream: %v", err)
}
glog.Infof("Successfully distributed %d EC shards across %d servers", len(shardFiles), len(availableServers))
if resp.Error != "" {
glog.Errorf("AUX UPLOAD ERROR: Server error uploading %s: %s", ext, resp.Error)
return fmt.Errorf("server error uploading auxiliary file: %s", resp.Error)
}
glog.V(1).Infof("AUX UPLOAD SUCCESS: %s (%d bytes) successfully uploaded to %s", ext, resp.BytesWritten, targetServer)
return nil
}
@ -993,84 +1235,156 @@ func (t *Task) deleteVolumeFromAllLocations(volumeId needle.VolumeId, locations
return nil
}
// uploadShardToTargetServer uploads a shard file to target server using HTTP upload
// uploadShardToTargetServer streams shard data to target server using gRPC ReceiveFile
func (t *Task) uploadShardToTargetServer(shardFile string, targetServer pb.ServerAddress, shardId uint32) error {
glog.V(1).Infof("Uploading shard file %s (shard %d) to server %s", shardFile, shardId, targetServer)
glog.V(1).Infof("UPLOAD START: Streaming shard %d to server %s via gRPC", shardId, targetServer)
// Read the shard file
file, err := os.Open(shardFile)
if err != nil {
glog.Errorf("UPLOAD ERROR: Failed to open shard file %s: %v", shardFile, err)
return fmt.Errorf("failed to open shard file %s: %v", shardFile, err)
}
defer file.Close()
// Read the shard file content
shardData, err := os.ReadFile(shardFile)
fileInfo, err := file.Stat()
if err != nil {
return fmt.Errorf("shard file %s not found: %v", shardFile, err)
glog.Errorf("UPLOAD ERROR: Failed to stat shard file %s: %v", shardFile, err)
return fmt.Errorf("failed to stat shard file: %v", err)
}
if len(shardData) == 0 {
if fileInfo.Size() == 0 {
glog.Errorf("UPLOAD ERROR: Shard file %s is empty", shardFile)
return fmt.Errorf("shard file %s is empty", shardFile)
}
// Create the target EC shard filename
shardFilename := fmt.Sprintf("%d.ec%02d", t.volumeID, shardId)
glog.V(1).Infof("UPLOAD DETAILS: Shard %d file %s size: %d bytes", shardId, shardFile, fileInfo.Size())
// Connect to target volume server
grpcAddress := pb.ServerToGrpcAddress(string(targetServer))
conn, err := grpc.NewClient(grpcAddress, t.grpcDialOpt)
if err != nil {
glog.Errorf("UPLOAD ERROR: Failed to connect to %s: %v", targetServer, err)
return fmt.Errorf("failed to connect to %s: %v", targetServer, err)
}
defer conn.Close()
glog.V(2).Infof("UPLOAD GRPC: Connected to %s for shard %d", targetServer, shardId)
// Upload to volume server using HTTP POST
// Use the volume server's upload endpoint for EC shards
uploadUrl := fmt.Sprintf("http://%s/admin/assign?volumeId=%d&type=ec", targetServer, t.volumeID)
client := volume_server_pb.NewVolumeServerClient(conn)
ctx := context.Background()
// Create multipart form data for the shard upload
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewReader(shardData))
// Create streaming client
stream, err := client.ReceiveFile(ctx)
if err != nil {
return fmt.Errorf("failed to create upload request: %v", err)
glog.Errorf("UPLOAD ERROR: Failed to create receive stream for shard %d: %v", shardId, err)
return fmt.Errorf("failed to create receive stream: %v", err)
}
glog.V(2).Infof("UPLOAD GRPC: Created stream for shard %d to %s", shardId, targetServer)
// Send file info first
err = stream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_Info{
Info: &volume_server_pb.ReceiveFileInfo{
VolumeId: t.volumeID,
Ext: fmt.Sprintf(".ec%02d", shardId),
Collection: t.collection,
IsEcVolume: true,
ShardId: shardId,
FileSize: uint64(fileInfo.Size()),
},
},
})
if err != nil {
glog.Errorf("UPLOAD ERROR: Failed to send file info for shard %d: %v", shardId, err)
return fmt.Errorf("failed to send file info: %v", err)
}
glog.V(2).Infof("UPLOAD GRPC: Sent file info for shard %d", shardId)
// Stream file content in chunks
buffer := make([]byte, 64*1024) // 64KB chunks
totalSent := int64(0)
chunkCount := 0
for {
n, err := file.Read(buffer)
if err != nil && err != io.EOF {
glog.Errorf("UPLOAD ERROR: Failed to read shard file %s: %v", shardFile, err)
return fmt.Errorf("failed to read file: %v", err)
}
// Set headers
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(shardData)))
req.Header.Set("X-Shard-Id", fmt.Sprintf("%d", shardId))
req.Header.Set("X-Volume-Id", fmt.Sprintf("%d", t.volumeID))
req.Header.Set("X-File-Name", shardFilename)
if t.collection != "" {
req.Header.Set("X-Collection", t.collection)
// Send data if we read any
if n > 0 {
err = stream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_FileContent{
FileContent: buffer[:n],
},
})
if err != nil {
glog.Errorf("UPLOAD ERROR: Failed to send chunk %d for shard %d: %v", chunkCount, shardId, err)
return fmt.Errorf("failed to send file chunk: %v", err)
}
totalSent += int64(n)
chunkCount++
glog.V(3).Infof("UPLOAD CHUNK: Shard %d chunk %d sent (%d bytes, total: %d)", shardId, chunkCount, n, totalSent)
}
// Break if we reached EOF
if err == io.EOF {
break
}
}
glog.V(2).Infof("UPLOAD GRPC: Completed streaming shard %d (%d bytes in %d chunks)", shardId, totalSent, chunkCount)
// Execute the upload
client := &http.Client{Timeout: 60 * time.Second}
resp, err := client.Do(req)
// Close stream and get response
resp, err := stream.CloseAndRecv()
if err != nil {
return fmt.Errorf("failed to upload shard: %v", err)
glog.Errorf("UPLOAD ERROR: Failed to close stream for shard %d: %v", shardId, err)
return fmt.Errorf("failed to close stream: %v", err)
}
defer resp.Body.Close()
// Check response
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
body, _ := io.ReadAll(resp.Body)
glog.Warningf("Upload failed with status %d: %s", resp.StatusCode, string(body))
// For now, don't fail on upload errors to test the flow
glog.V(1).Infof("Shard upload not supported by volume server, continuing...")
} else {
glog.V(1).Infof("Successfully uploaded shard %d (%d bytes) to server %s", shardId, len(shardData), targetServer)
if resp.Error != "" {
glog.Errorf("UPLOAD ERROR: Server error for shard %d: %s", shardId, resp.Error)
return fmt.Errorf("server error: %s", resp.Error)
}
glog.V(1).Infof("UPLOAD SUCCESS: Shard %d (%d bytes) successfully uploaded to %s", shardId, resp.BytesWritten, targetServer)
return nil
}
// uploadShardDataDirectly is no longer needed - kept for compatibility
func (t *Task) uploadShardDataDirectly(file *os.File, targetServer pb.ServerAddress, shardId uint32, fileSize int64) error {
// This method is deprecated in favor of gRPC streaming
return fmt.Errorf("uploadShardDataDirectly is deprecated - use gRPC ReceiveFile instead")
}
// mountShardOnServer mounts an EC shard on target server
func (t *Task) mountShardOnServer(targetServer pb.ServerAddress, shardId uint32) error {
glog.V(1).Infof("MOUNT START: Mounting shard %d on server %s", shardId, targetServer)
// Connect to target server
grpcAddress := pb.ServerToGrpcAddress(string(targetServer))
conn, err := grpc.NewClient(grpcAddress, t.grpcDialOpt)
if err != nil {
glog.Errorf("MOUNT ERROR: Failed to connect to %s for shard %d: %v", targetServer, shardId, err)
return fmt.Errorf("failed to connect to %s: %v", targetServer, err)
}
defer conn.Close()
glog.V(2).Infof("MOUNT GRPC: Connected to %s for shard %d", targetServer, shardId)
client := volume_server_pb.NewVolumeServerClient(conn)
_, err = client.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
ctx := context.Background()
// Mount the shard
_, err = client.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: t.volumeID,
Collection: t.collection,
ShardIds: []uint32{shardId},
})
if err != nil {
glog.Errorf("MOUNT ERROR: Failed to mount shard %d on %s: %v", shardId, targetServer, err)
return fmt.Errorf("failed to mount shard %d: %v", shardId, err)
}
glog.V(1).Infof("Successfully mounted shard %d on server %s", shardId, targetServer)
glog.V(1).Infof("MOUNT SUCCESS: Shard %d successfully mounted on %s", shardId, targetServer)
return nil
}

Loading…
Cancel
Save