From 9d013ea9b8edbd6cf3030730a8a0ab02d00a47da Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 2 Aug 2025 02:16:49 -0700 Subject: [PATCH] Admin UI: include ec shard sizes into volume server info (#7071) * show ec shards on dashboard, show max in its own column * master collect shard size info * master send shard size via VolumeList * change to more efficient shard sizes slice * include ec shard sizes into volume server info * Eliminated Redundant gRPC Calls * much more efficient * Efficient Counting: bits.OnesCount32() uses CPU-optimized instructions to count set bits in O(1) * avoid extra volume list call * simplify * preserve existing shard sizes * avoid hard coded value * Update weed/storage/erasure_coding/ec_volume_info.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/admin/dash/volume_management.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update ec_volume_info.go * address comments * avoid duplicated functions * Update weed/admin/dash/volume_management.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * simplify * refactoring * fix compilation --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- weed/admin/dash/admin_data.go | 28 +- weed/admin/dash/cluster_topology.go | 7 + weed/admin/dash/types.go | 16 + weed/admin/dash/volume_management.go | 184 ++++++++++- weed/admin/view/app/admin.templ | 60 +++- weed/admin/view/app/admin_templ.go | 297 +++++++++++------- .../view/app/cluster_volume_servers.templ | 40 ++- .../view/app/cluster_volume_servers_templ.go | 242 +++++++++----- weed/pb/master.proto | 1 + weed/pb/master_pb/master.pb.go | 14 +- weed/shell/command_volume_list.go | 24 +- weed/storage/erasure_coding/ec_encoder.go | 1 + .../erasure_coding/ec_shard_size_helper.go | 68 ++++ .../ec_shard_size_helper_test.go | 117 +++++++ weed/storage/erasure_coding/ec_volume.go | 3 + weed/storage/erasure_coding/ec_volume_info.go | 162 ++++++++-- weed/topology/topology_ec.go | 63 ++-- weed/worker/tasks/erasure_coding/detection.go | 11 +- weed/worker/tasks/erasure_coding/ec_task.go | 3 - 19 files changed, 1083 insertions(+), 258 deletions(-) create mode 100644 weed/storage/erasure_coding/ec_shard_size_helper.go create mode 100644 weed/storage/erasure_coding/ec_shard_size_helper_test.go diff --git a/weed/admin/dash/admin_data.go b/weed/admin/dash/admin_data.go index 7571bdf6a..b474437c4 100644 --- a/weed/admin/dash/admin_data.go +++ b/weed/admin/dash/admin_data.go @@ -23,6 +23,10 @@ type AdminData struct { MessageBrokers []MessageBrokerNode `json:"message_brokers"` DataCenters []DataCenter `json:"datacenters"` LastUpdated time.Time `json:"last_updated"` + + // EC shard totals for dashboard + TotalEcVolumes int `json:"total_ec_volumes"` // Total number of EC volumes across all servers + TotalEcShards int `json:"total_ec_shards"` // Total number of EC shards across all servers } // Object Store Users management structures @@ -98,6 +102,13 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) { return AdminData{}, err } + // Get volume servers data with EC shard information + volumeServersData, err := s.GetClusterVolumeServers() + if err != nil { + glog.Errorf("Failed to get cluster volume servers: %v", err) + return AdminData{}, err + } + // Get master nodes status masterNodes := s.getMasterNodesStatus() @@ -122,6 +133,19 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) { // Keep default value on error } + // Calculate EC shard totals + var totalEcVolumes, totalEcShards int + ecVolumeSet := make(map[uint32]bool) // To avoid counting the same EC volume multiple times + + for _, vs := range volumeServersData.VolumeServers { + totalEcShards += vs.EcShards + // Count unique EC volumes across all servers + for _, ecInfo := range vs.EcShardDetails { + ecVolumeSet[ecInfo.VolumeID] = true + } + } + totalEcVolumes = len(ecVolumeSet) + // Prepare admin data adminData := AdminData{ Username: username, @@ -130,11 +154,13 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) { TotalSize: topology.TotalSize, VolumeSizeLimitMB: volumeSizeLimitMB, MasterNodes: masterNodes, - VolumeServers: topology.VolumeServers, + VolumeServers: volumeServersData.VolumeServers, FilerNodes: filerNodes, MessageBrokers: messageBrokers, DataCenters: topology.DataCenters, LastUpdated: topology.UpdatedAt, + TotalEcVolumes: totalEcVolumes, + TotalEcShards: totalEcShards, } return adminData, nil diff --git a/weed/admin/dash/cluster_topology.go b/weed/admin/dash/cluster_topology.go index 2bac7145e..8c25cc2ac 100644 --- a/weed/admin/dash/cluster_topology.go +++ b/weed/admin/dash/cluster_topology.go @@ -76,6 +76,13 @@ func (s *AdminServer) getTopologyViaGRPC(topology *ClusterTopology) error { totalSize += int64(volInfo.Size) totalFiles += int64(volInfo.FileCount) } + + // Sum up EC shard sizes + for _, ecShardInfo := range diskInfo.EcShardInfos { + for _, shardSize := range ecShardInfo.ShardSizes { + totalSize += shardSize + } + } } vs := VolumeServer{ diff --git a/weed/admin/dash/types.go b/weed/admin/dash/types.go index f098fad8c..18c46a48d 100644 --- a/weed/admin/dash/types.go +++ b/weed/admin/dash/types.go @@ -44,6 +44,22 @@ type VolumeServer struct { DiskUsage int64 `json:"disk_usage"` DiskCapacity int64 `json:"disk_capacity"` LastHeartbeat time.Time `json:"last_heartbeat"` + + // EC shard information + EcVolumes int `json:"ec_volumes"` // Number of EC volumes this server has shards for + EcShards int `json:"ec_shards"` // Total number of EC shards on this server + EcShardDetails []VolumeServerEcInfo `json:"ec_shard_details"` // Detailed EC shard information +} + +// VolumeServerEcInfo represents EC shard information for a specific volume on a server +type VolumeServerEcInfo struct { + VolumeID uint32 `json:"volume_id"` + Collection string `json:"collection"` + ShardCount int `json:"shard_count"` // Number of shards this server has for this volume + EcIndexBits uint32 `json:"ec_index_bits"` // Bitmap of which shards this server has + ShardNumbers []int `json:"shard_numbers"` // List of actual shard numbers this server has + ShardSizes map[int]int64 `json:"shard_sizes"` // Map from shard number to size in bytes + TotalSize int64 `json:"total_size"` // Total size of all shards on this server for this volume } // S3 Bucket management structures diff --git a/weed/admin/dash/volume_management.go b/weed/admin/dash/volume_management.go index 61f2ee691..5dabe2674 100644 --- a/weed/admin/dash/volume_management.go +++ b/weed/admin/dash/volume_management.go @@ -7,6 +7,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" + "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" ) // GetClusterVolumes retrieves cluster volumes data with pagination, sorting, and filtering @@ -26,6 +27,7 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s } var volumes []VolumeWithTopology var totalSize int64 + var cachedTopologyInfo *master_pb.TopologyInfo // Get detailed volume information via gRPC err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { @@ -34,11 +36,15 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s return err } + // Cache the topology info for reuse + cachedTopologyInfo = resp.TopologyInfo + if resp.TopologyInfo != nil { for _, dc := range resp.TopologyInfo.DataCenterInfos { for _, rack := range dc.RackInfos { for _, node := range rack.DataNodeInfos { for _, diskInfo := range node.DiskInfos { + // Process regular volumes for _, volInfo := range diskInfo.VolumeInfos { volume := VolumeWithTopology{ VolumeInformationMessage: volInfo, @@ -49,6 +55,14 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s volumes = append(volumes, volume) totalSize += int64(volInfo.Size) } + + // Process EC shards in the same loop + for _, ecShardInfo := range diskInfo.EcShardInfos { + // Add all shard sizes for this EC volume + for _, shardSize := range ecShardInfo.ShardSizes { + totalSize += shardSize + } + } } } } @@ -66,6 +80,8 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s if collection != "" { var filteredVolumes []VolumeWithTopology var filteredTotalSize int64 + var filteredEcTotalSize int64 + for _, volume := range volumes { // Handle "default" collection filtering for empty collections volumeCollection := volume.Collection @@ -78,8 +94,36 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s filteredTotalSize += int64(volume.Size) } } + + // Filter EC shard sizes by collection using already processed data + // This reuses the topology traversal done above (lines 43-71) to avoid a second pass + if cachedTopologyInfo != nil { + for _, dc := range cachedTopologyInfo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, node := range rack.DataNodeInfos { + for _, diskInfo := range node.DiskInfos { + for _, ecShardInfo := range diskInfo.EcShardInfos { + // Handle "default" collection filtering for empty collections + ecCollection := ecShardInfo.Collection + if ecCollection == "" { + ecCollection = "default" + } + + if ecCollection == collection { + // Add all shard sizes for this EC volume + for _, shardSize := range ecShardInfo.ShardSizes { + filteredEcTotalSize += shardSize + } + } + } + } + } + } + } + } + volumes = filteredVolumes - totalSize = filteredTotalSize + totalSize = filteredTotalSize + filteredEcTotalSize } // Calculate unique data center, rack, disk type, collection, and version counts from filtered volumes @@ -370,23 +414,151 @@ func (s *AdminServer) VacuumVolume(volumeID int, server string) error { }) } -// GetClusterVolumeServers retrieves cluster volume servers data +// GetClusterVolumeServers retrieves cluster volume servers data including EC shard information func (s *AdminServer) GetClusterVolumeServers() (*ClusterVolumeServersData, error) { - topology, err := s.GetClusterTopology() + var volumeServerMap map[string]*VolumeServer + + // Make only ONE VolumeList call and use it for both topology building AND EC shard processing + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + if err != nil { + return err + } + + // Get volume size limit from response, default to 30GB if not set + volumeSizeLimitMB := resp.VolumeSizeLimitMb + if volumeSizeLimitMB == 0 { + volumeSizeLimitMB = 30000 // default to 30000MB (30GB) + } + + // Build basic topology from the VolumeList response (replaces GetClusterTopology call) + volumeServerMap = make(map[string]*VolumeServer) + + if resp.TopologyInfo != nil { + // Process topology to build basic volume server info (similar to cluster_topology.go logic) + for _, dc := range resp.TopologyInfo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, node := range rack.DataNodeInfos { + // Initialize volume server if not exists + if volumeServerMap[node.Id] == nil { + volumeServerMap[node.Id] = &VolumeServer{ + Address: node.Id, + DataCenter: dc.Id, + Rack: rack.Id, + Volumes: 0, + DiskUsage: 0, + DiskCapacity: 0, + EcVolumes: 0, + EcShards: 0, + EcShardDetails: []VolumeServerEcInfo{}, + } + } + vs := volumeServerMap[node.Id] + + // Process EC shard information for this server at volume server level (not per-disk) + ecVolumeMap := make(map[uint32]*VolumeServerEcInfo) + // Temporary map to accumulate shard info across disks + ecShardAccumulator := make(map[uint32][]*master_pb.VolumeEcShardInformationMessage) + + // Process disk information + for _, diskInfo := range node.DiskInfos { + vs.DiskCapacity += int64(diskInfo.MaxVolumeCount) * int64(volumeSizeLimitMB) * 1024 * 1024 // Use actual volume size limit + + // Count regular volumes and calculate disk usage + for _, volInfo := range diskInfo.VolumeInfos { + vs.Volumes++ + vs.DiskUsage += int64(volInfo.Size) + } + + // Accumulate EC shard information across all disks for this volume server + for _, ecShardInfo := range diskInfo.EcShardInfos { + volumeId := ecShardInfo.Id + ecShardAccumulator[volumeId] = append(ecShardAccumulator[volumeId], ecShardInfo) + } + } + + // Process accumulated EC shard information per volume + for volumeId, ecShardInfos := range ecShardAccumulator { + if len(ecShardInfos) == 0 { + continue + } + + // Initialize EC volume info + ecInfo := &VolumeServerEcInfo{ + VolumeID: volumeId, + Collection: ecShardInfos[0].Collection, + ShardCount: 0, + EcIndexBits: 0, + ShardNumbers: []int{}, + ShardSizes: make(map[int]int64), + TotalSize: 0, + } + + // Merge EcIndexBits from all disks and collect shard sizes + allShardSizes := make(map[erasure_coding.ShardId]int64) + for _, ecShardInfo := range ecShardInfos { + ecInfo.EcIndexBits |= ecShardInfo.EcIndexBits + + // Collect shard sizes from this disk + shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + shardBits.EachSetIndex(func(shardId erasure_coding.ShardId) { + if size, found := erasure_coding.GetShardSize(ecShardInfo, shardId); found { + allShardSizes[shardId] = size + } + }) + } + + // Process final merged shard information + finalShardBits := erasure_coding.ShardBits(ecInfo.EcIndexBits) + finalShardBits.EachSetIndex(func(shardId erasure_coding.ShardId) { + ecInfo.ShardCount++ + ecInfo.ShardNumbers = append(ecInfo.ShardNumbers, int(shardId)) + vs.EcShards++ + + // Add shard size if available + if shardSize, exists := allShardSizes[shardId]; exists { + ecInfo.ShardSizes[int(shardId)] = shardSize + ecInfo.TotalSize += shardSize + vs.DiskUsage += shardSize // Add EC shard size to total disk usage + } + }) + + ecVolumeMap[volumeId] = ecInfo + } + + // Convert EC volume map to slice and update volume server (after processing all disks) + for _, ecInfo := range ecVolumeMap { + vs.EcShardDetails = append(vs.EcShardDetails, *ecInfo) + vs.EcVolumes++ + } + } + } + } + } + + return nil + }) + if err != nil { return nil, err } + // Convert map back to slice + var volumeServers []VolumeServer + for _, vs := range volumeServerMap { + volumeServers = append(volumeServers, *vs) + } + var totalCapacity int64 var totalVolumes int - for _, vs := range topology.VolumeServers { + for _, vs := range volumeServers { totalCapacity += vs.DiskCapacity totalVolumes += vs.Volumes } return &ClusterVolumeServersData{ - VolumeServers: topology.VolumeServers, - TotalVolumeServers: len(topology.VolumeServers), + VolumeServers: volumeServers, + TotalVolumeServers: len(volumeServers), TotalVolumes: totalVolumes, TotalCapacity: totalCapacity, LastUpdated: time.Now(), diff --git a/weed/admin/view/app/admin.templ b/weed/admin/view/app/admin.templ index 2dd1a0ace..534c798bd 100644 --- a/weed/admin/view/app/admin.templ +++ b/weed/admin/view/app/admin.templ @@ -104,6 +104,53 @@ templ Admin(data dash.AdminData) { + +
+
+
+
+
+
+
+ EC Volumes +
+
+ {fmt.Sprintf("%d", data.TotalEcVolumes)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ EC Shards +
+
+ {fmt.Sprintf("%d", data.TotalEcShards)} +
+
+
+ +
+
+
+
+
+ + +
+
+
+
@@ -219,6 +266,7 @@ templ Admin(data dash.AdminData) { Data Center Rack Volumes + EC Shards Capacity @@ -242,12 +290,22 @@ templ Admin(data dash.AdminData) {
+ + if vs.EcShards > 0 { + {fmt.Sprintf("%d", vs.EcShards)} + if vs.EcVolumes > 0 { + ({fmt.Sprintf("%d vol", vs.EcVolumes)}) + } + } else { + - + } + {formatBytes(vs.DiskUsage)} / {formatBytes(vs.DiskCapacity)} } if len(data.VolumeServers) == 0 { - + No volume servers found diff --git a/weed/admin/view/app/admin_templ.go b/weed/admin/view/app/admin_templ.go index 558b4fafc..906c0fd1c 100644 --- a/weed/admin/view/app/admin_templ.go +++ b/weed/admin/view/app/admin_templ.go @@ -86,324 +86,397 @@ func Admin(data dash.AdminData) templ.Component { if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
Master Nodes
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
EC Volumes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 118, Col: 75} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
EC Shards
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcShards)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 138, Col: 74} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Master Nodes
AddressRole
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } for _, master := range data.MasterNodes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
AddressRole
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address) + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 128, Col: 63} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 175, Col: 63} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } if master.IsLeader { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "Leader") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "Leader") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "Follower") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "Follower") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "
Cluster
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
Cluster
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MasterNodes))) + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MasterNodes))) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 158, Col: 85} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 205, Col: 85} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
Masters
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
Masters
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.VolumeServers))) + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.VolumeServers))) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 166, Col: 87} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 213, Col: 87} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
Volume Servers
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Volume Servers
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.FilerNodes))) + var templ_7745c5c3_Var11 string + templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.FilerNodes))) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 174, Col: 84} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 221, Col: 84} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
Filers
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
Filers
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MessageBrokers))) + var templ_7745c5c3_Var12 string + templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MessageBrokers))) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 182, Col: 88} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 229, Col: 88} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Message Brokers
Volume Servers
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "Message Brokers
Volume Servers
IDAddressData CenterRackVolumesCapacity
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } for _, vs := range data.VolumeServers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } if len(data.VolumeServers) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "
IDAddressData CenterRackVolumesEC ShardsCapacity
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(vs.ID) + var templ_7745c5c3_Var13 string + templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(vs.ID) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 228, Col: 54} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 276, Col: 54} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "\" target=\"_blank\">") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Address) + var templ_7745c5c3_Var15 string + templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Address) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 231, Col: 63} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 279, Col: 63} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, " ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, " ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(vs.DataCenter) + var templ_7745c5c3_Var16 string + templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(vs.DataCenter) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 235, Col: 62} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 283, Col: 62} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Rack) + var templ_7745c5c3_Var17 string + templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Rack) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 236, Col: 56} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 284, Col: 56} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "\">") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes)) + var templ_7745c5c3_Var19 string + templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes)) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 241, Col: 104} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 289, Col: 104} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskUsage)) + if vs.EcShards > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var20 string + templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", vs.EcShards)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 295, Col: 127} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if vs.EcVolumes > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "(") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var21 string + templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d vol", vs.EcVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 297, Col: 119} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, ")") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "-") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 245, Col: 74} + return templ_7745c5c3_Err } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) + var templ_7745c5c3_Var22 string + templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskUsage)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 74} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, " / ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " / ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskCapacity)) + var templ_7745c5c3_Var23 string + templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskCapacity)) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 245, Col: 107} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 107} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
No volume servers found
No volume servers found
Filer Nodes
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
AddressData CenterRackLast Updated
Filer Nodes
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } for _, filer := range data.FilerNodes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } if len(data.FilerNodes) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
AddressData CenterRackLast Updated
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\" target=\"_blank\">") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address) + var templ_7745c5c3_Var25 string + templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 299, Col: 66} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 357, Col: 66} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, " ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter) + var templ_7745c5c3_Var26 string + templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 65} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 361, Col: 65} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack) + var templ_7745c5c3_Var27 string + templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 304, Col: 59} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 362, Col: 59} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var24 string - templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(filer.LastUpdated.Format("2006-01-02 15:04:05")) + var templ_7745c5c3_Var28 string + templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(filer.LastUpdated.Format("2006-01-02 15:04:05")) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 305, Col: 96} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 363, Col: 96} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
No filer nodes found
No filer nodes found
Last updated: ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "
Last updated: ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var25 string - templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + var templ_7745c5c3_Var29 string + templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 329, Col: 81} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 387, Col: 81} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } diff --git a/weed/admin/view/app/cluster_volume_servers.templ b/weed/admin/view/app/cluster_volume_servers.templ index f6b737a57..26cb659c5 100644 --- a/weed/admin/view/app/cluster_volume_servers.templ +++ b/weed/admin/view/app/cluster_volume_servers.templ @@ -103,6 +103,8 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) { Data Center Rack Volumes + Max Volumes + EC Shards Capacity Usage Actions @@ -133,9 +135,28 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) { style={fmt.Sprintf("width: %d%%", calculatePercent(host.Volumes, host.MaxVolumes))}> - {fmt.Sprintf("%d/%d", host.Volumes, host.MaxVolumes)} + {fmt.Sprintf("%d", host.Volumes)} + + {fmt.Sprintf("%d", host.MaxVolumes)} + + + if host.EcShards > 0 { +
+ + {fmt.Sprintf("%d", host.EcShards)} + shards +
+ if host.EcVolumes > 0 { +
+ {fmt.Sprintf("%d EC volumes", host.EcVolumes)} +
+ } + } else { + - + } + {formatBytes(host.DiskCapacity)}
@@ -161,6 +182,8 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) { data-max-volumes={fmt.Sprintf("%d", host.MaxVolumes)} data-disk-usage={fmt.Sprintf("%d", host.DiskUsage)} data-disk-capacity={fmt.Sprintf("%d", host.DiskCapacity)} + data-ec-volumes={fmt.Sprintf("%d", host.EcVolumes)} + data-ec-shards={fmt.Sprintf("%d", host.EcShards)} data-last-heartbeat={host.LastHeartbeat.Format("2006-01-02 15:04:05")}> @@ -213,6 +236,8 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) { maxVolumes: parseInt(button.getAttribute('data-max-volumes')), diskUsage: parseInt(button.getAttribute('data-disk-usage')), diskCapacity: parseInt(button.getAttribute('data-disk-capacity')), + ecVolumes: parseInt(button.getAttribute('data-ec-volumes')), + ecShards: parseInt(button.getAttribute('data-ec-shards')), lastHeartbeat: button.getAttribute('data-last-heartbeat') }; showVolumeServerDetails(serverData); @@ -268,6 +293,19 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) { '' + '
' + '' + + + // Add EC Shard information if available + (server.ecShards > 0 ? + '
' + + '
' + + '
Erasure Coding Information
' + + '' + + '' + + '' + + '
EC Volumes:' + server.ecVolumes + '
EC Shards:' + server.ecShards + '
' + + '
' + + '
' : '') + + '
' + '
' + '
Quick Actions
' + diff --git a/weed/admin/view/app/cluster_volume_servers_templ.go b/weed/admin/view/app/cluster_volume_servers_templ.go index 094774c7a..b25f86880 100644 --- a/weed/admin/view/app/cluster_volume_servers_templ.go +++ b/weed/admin/view/app/cluster_volume_servers_templ.go @@ -78,7 +78,7 @@ func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { return templ_7745c5c3_Err } if len(data.VolumeServers) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
Server IDAddressData CenterRackVolumesCapacityUsageActions
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } @@ -90,7 +90,7 @@ func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { var templ_7745c5c3_Var5 string templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(host.ID) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 115, Col: 58} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 117, Col: 58} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) if templ_7745c5c3_Err != nil { @@ -103,7 +103,7 @@ func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { var templ_7745c5c3_Var6 templ.SafeURL templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", host.PublicURL))) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 118, Col: 122} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 120, Col: 122} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) if templ_7745c5c3_Err != nil { @@ -116,7 +116,7 @@ func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { var templ_7745c5c3_Var7 string templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(host.Address) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 119, Col: 61} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 121, Col: 61} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) if templ_7745c5c3_Err != nil { @@ -129,7 +129,7 @@ func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { var templ_7745c5c3_Var8 string templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(host.DataCenter) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 124, Col: 99} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 126, Col: 99} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) if templ_7745c5c3_Err != nil { @@ -142,7 +142,7 @@ func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { var templ_7745c5c3_Var9 string templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(host.Rack) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 127, Col: 93} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 129, Col: 93} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) if templ_7745c5c3_Err != nil { @@ -155,223 +155,309 @@ func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { var templ_7745c5c3_Var10 string templ_7745c5c3_Var10, templ_7745c5c3_Err = templruntime.SanitizeStyleAttributeValues(fmt.Sprintf("width: %d%%", calculatePercent(host.Volumes, host.MaxVolumes))) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 133, Col: 139} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 135, Col: 139} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "\">") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "\">") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", host.Volumes, host.MaxVolumes)) + templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.Volumes)) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 136, Col: 107} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 138, Col: 111} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\" data-disk-usage=\"") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var25 string + templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.DiskUsage)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 183, Col: 102} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "\" data-disk-capacity=\"") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var26 string + templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.DiskCapacity)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 184, Col: 108} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "\" data-ec-volumes=\"") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var27 string + templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.EcVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 185, Col: 102} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "\" data-ec-shards=\"") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var28 string + templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.EcShards)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 186, Col: 100} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "\" data-last-heartbeat=\"") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var29 string + templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(host.LastHeartbeat.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 187, Col: 121} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "\">") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "
Server IDAddressData CenterRackVolumesMax VolumesEC ShardsCapacityUsageActions
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskCapacity)) + templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.MaxVolumes)) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 139, Col: 75} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 142, Col: 112} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskUsage)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 147, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if host.EcShards > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var13 string + templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.EcShards)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 148, Col: 129} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, " shards
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if host.EcVolumes > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var14 string + templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d EC volumes", host.EcVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 153, Col: 127} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "-") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(host.ID) + templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskCapacity)) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 155, Col: 68} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 160, Col: 75} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "\" data-address=\"") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(host.PublicURL) + templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskUsage)) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 157, Col: 83} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 168, Col: 83} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "\" data-datacenter=\"") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
No Volume Servers Found

No volume servers are currently available in the cluster.

") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
No Volume Servers Found

No volume servers are currently available in the cluster.

") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "
Last updated: ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "
Last updated: ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - var templ_7745c5c3_Var25 string - templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + var templ_7745c5c3_Var30 string + templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 188, Col: 81} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 211, Col: 81} } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "
") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 403dbb278..f8049c466 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -129,6 +129,7 @@ message VolumeEcShardInformationMessage { string disk_type = 4; uint64 expire_at_sec = 5; // used to record the destruction time of ec volume uint32 disk_id = 6; + repeated int64 shard_sizes = 7; // optimized: sizes for shards in order of set bits in ec_index_bits } message StorageBackend { diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 0f772f0dc..19df43d71 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -560,6 +560,7 @@ type VolumeEcShardInformationMessage struct { DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` ExpireAtSec uint64 `protobuf:"varint,5,opt,name=expire_at_sec,json=expireAtSec,proto3" json:"expire_at_sec,omitempty"` // used to record the destruction time of ec volume DiskId uint32 `protobuf:"varint,6,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` + ShardSizes []int64 `protobuf:"varint,7,rep,packed,name=shard_sizes,json=shardSizes,proto3" json:"shard_sizes,omitempty"` // optimized: sizes for shards in order of set bits in ec_index_bits unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -636,6 +637,13 @@ func (x *VolumeEcShardInformationMessage) GetDiskId() uint32 { return 0 } +func (x *VolumeEcShardInformationMessage) GetShardSizes() []int64 { + if x != nil { + return x.ShardSizes + } + return nil +} + type StorageBackend struct { state protoimpl.MessageState `protogen:"open.v1"` Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` @@ -4098,7 +4106,7 @@ const file_master_proto_rawDesc = "" + "\x03ttl\x18\n" + " \x01(\rR\x03ttl\x12\x1b\n" + "\tdisk_type\x18\x0f \x01(\tR\bdiskType\x12\x17\n" + - "\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xcf\x01\n" + + "\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xf0\x01\n" + "\x1fVolumeEcShardInformationMessage\x12\x0e\n" + "\x02id\x18\x01 \x01(\rR\x02id\x12\x1e\n" + "\n" + @@ -4107,7 +4115,9 @@ const file_master_proto_rawDesc = "" + "\rec_index_bits\x18\x03 \x01(\rR\vecIndexBits\x12\x1b\n" + "\tdisk_type\x18\x04 \x01(\tR\bdiskType\x12\"\n" + "\rexpire_at_sec\x18\x05 \x01(\x04R\vexpireAtSec\x12\x17\n" + - "\adisk_id\x18\x06 \x01(\rR\x06diskId\"\xbe\x01\n" + + "\adisk_id\x18\x06 \x01(\rR\x06diskId\x12\x1f\n" + + "\vshard_sizes\x18\a \x03(\x03R\n" + + "shardSizes\"\xbe\x01\n" + "\x0eStorageBackend\x12\x12\n" + "\x04type\x18\x01 \x01(\tR\x04type\x12\x0e\n" + "\x02id\x18\x02 \x01(\tR\x02id\x12I\n" + diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 9872736a4..f57c7f5be 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -13,6 +13,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/types" + "github.com/seaweedfs/seaweedfs/weed/util" "io" ) @@ -248,8 +249,27 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf if destroyTime > 0 { expireAtString = fmt.Sprintf("expireAt:%s", time.Unix(int64(destroyTime), 0).Format("2006-01-02 15:04:05")) } - output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v %s\n", - ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds(), expireAtString) + + // Build shard size information + shardIds := erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() + var totalSize int64 + var shardSizeInfo string + + if len(ecShardInfo.ShardSizes) > 0 { + var shardDetails []string + for _, shardId := range shardIds { + if size, found := erasure_coding.GetShardSize(ecShardInfo, erasure_coding.ShardId(shardId)); found { + shardDetails = append(shardDetails, fmt.Sprintf("%d:%s", shardId, util.BytesToHumanReadable(uint64(size)))) + totalSize += size + } else { + shardDetails = append(shardDetails, fmt.Sprintf("%d:?", shardId)) + } + } + shardSizeInfo = fmt.Sprintf(" sizes:[%s] total:%s", strings.Join(shardDetails, " "), util.BytesToHumanReadable(uint64(totalSize))) + } + + output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v%s %s\n", + ecShardInfo.Id, ecShardInfo.Collection, shardIds, shardSizeInfo, expireAtString) } output((volumeInfosFound || ecShardInfoFound) && verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s) return s diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index 5db65a2c8..eeeb156e6 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -18,6 +18,7 @@ const ( DataShardsCount = 10 ParityShardsCount = 4 TotalShardsCount = DataShardsCount + ParityShardsCount + MinTotalDisks = TotalShardsCount/ParityShardsCount + 1 ErasureCodingLargeBlockSize = 1024 * 1024 * 1024 // 1GB ErasureCodingSmallBlockSize = 1024 * 1024 // 1MB ) diff --git a/weed/storage/erasure_coding/ec_shard_size_helper.go b/weed/storage/erasure_coding/ec_shard_size_helper.go new file mode 100644 index 000000000..43d9a4f2f --- /dev/null +++ b/weed/storage/erasure_coding/ec_shard_size_helper.go @@ -0,0 +1,68 @@ +package erasure_coding + +import ( + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" +) + +// GetShardSize returns the size of a specific shard from VolumeEcShardInformationMessage +// Returns the size and true if the shard exists, 0 and false if not present +func GetShardSize(msg *master_pb.VolumeEcShardInformationMessage, shardId ShardId) (size int64, found bool) { + if msg == nil || msg.ShardSizes == nil { + return 0, false + } + + shardBits := ShardBits(msg.EcIndexBits) + index, found := shardBits.ShardIdToIndex(shardId) + if !found || index >= len(msg.ShardSizes) { + return 0, false + } + + return msg.ShardSizes[index], true +} + +// SetShardSize sets the size of a specific shard in VolumeEcShardInformationMessage +// Returns true if successful, false if the shard is not present in EcIndexBits +func SetShardSize(msg *master_pb.VolumeEcShardInformationMessage, shardId ShardId, size int64) bool { + if msg == nil { + return false + } + + shardBits := ShardBits(msg.EcIndexBits) + index, found := shardBits.ShardIdToIndex(shardId) + if !found { + return false + } + + // Initialize ShardSizes slice if needed + expectedLength := shardBits.ShardIdCount() + if msg.ShardSizes == nil { + msg.ShardSizes = make([]int64, expectedLength) + } else if len(msg.ShardSizes) != expectedLength { + // Resize the slice to match the expected length + newSizes := make([]int64, expectedLength) + copy(newSizes, msg.ShardSizes) + msg.ShardSizes = newSizes + } + + if index >= len(msg.ShardSizes) { + return false + } + + msg.ShardSizes[index] = size + return true +} + +// InitializeShardSizes initializes the ShardSizes slice based on EcIndexBits +// This ensures the slice has the correct length for all present shards +func InitializeShardSizes(msg *master_pb.VolumeEcShardInformationMessage) { + if msg == nil { + return + } + + shardBits := ShardBits(msg.EcIndexBits) + expectedLength := shardBits.ShardIdCount() + + if msg.ShardSizes == nil || len(msg.ShardSizes) != expectedLength { + msg.ShardSizes = make([]int64, expectedLength) + } +} diff --git a/weed/storage/erasure_coding/ec_shard_size_helper_test.go b/weed/storage/erasure_coding/ec_shard_size_helper_test.go new file mode 100644 index 000000000..2ef54c949 --- /dev/null +++ b/weed/storage/erasure_coding/ec_shard_size_helper_test.go @@ -0,0 +1,117 @@ +package erasure_coding + +import ( + "testing" + + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" +) + +func TestShardSizeHelpers(t *testing.T) { + // Create a message with shards 0, 2, and 5 present (EcIndexBits = 0b100101 = 37) + msg := &master_pb.VolumeEcShardInformationMessage{ + Id: 123, + EcIndexBits: 37, // Binary: 100101, shards 0, 2, 5 are present + } + + // Test SetShardSize + if !SetShardSize(msg, 0, 1000) { + t.Error("Failed to set size for shard 0") + } + if !SetShardSize(msg, 2, 2000) { + t.Error("Failed to set size for shard 2") + } + if !SetShardSize(msg, 5, 5000) { + t.Error("Failed to set size for shard 5") + } + + // Test setting size for non-present shard should fail + if SetShardSize(msg, 1, 1500) { + t.Error("Should not be able to set size for non-present shard 1") + } + + // Verify ShardSizes slice has correct length (3 shards) + if len(msg.ShardSizes) != 3 { + t.Errorf("Expected ShardSizes length 3, got %d", len(msg.ShardSizes)) + } + + // Test GetShardSize + if size, found := GetShardSize(msg, 0); !found || size != 1000 { + t.Errorf("Expected shard 0 size 1000, got %d (found: %v)", size, found) + } + if size, found := GetShardSize(msg, 2); !found || size != 2000 { + t.Errorf("Expected shard 2 size 2000, got %d (found: %v)", size, found) + } + if size, found := GetShardSize(msg, 5); !found || size != 5000 { + t.Errorf("Expected shard 5 size 5000, got %d (found: %v)", size, found) + } + + // Test getting size for non-present shard + if size, found := GetShardSize(msg, 1); found { + t.Errorf("Should not find shard 1, but got size %d", size) + } + + // Test direct slice access + if len(msg.ShardSizes) != 3 { + t.Errorf("Expected 3 shard sizes in slice, got %d", len(msg.ShardSizes)) + } + + expectedSizes := []int64{1000, 2000, 5000} // Ordered by shard ID: 0, 2, 5 + for i, expectedSize := range expectedSizes { + if i < len(msg.ShardSizes) && msg.ShardSizes[i] != expectedSize { + t.Errorf("Expected ShardSizes[%d] = %d, got %d", i, expectedSize, msg.ShardSizes[i]) + } + } +} + +func TestShardBitsHelpers(t *testing.T) { + // Test with EcIndexBits = 37 (binary: 100101, shards 0, 2, 5) + shardBits := ShardBits(37) + + // Test ShardIdToIndex + if index, found := shardBits.ShardIdToIndex(0); !found || index != 0 { + t.Errorf("Expected shard 0 at index 0, got %d (found: %v)", index, found) + } + if index, found := shardBits.ShardIdToIndex(2); !found || index != 1 { + t.Errorf("Expected shard 2 at index 1, got %d (found: %v)", index, found) + } + if index, found := shardBits.ShardIdToIndex(5); !found || index != 2 { + t.Errorf("Expected shard 5 at index 2, got %d (found: %v)", index, found) + } + + // Test for non-present shard + if index, found := shardBits.ShardIdToIndex(1); found { + t.Errorf("Should not find shard 1, but got index %d", index) + } + + // Test IndexToShardId + if shardId, found := shardBits.IndexToShardId(0); !found || shardId != 0 { + t.Errorf("Expected index 0 to be shard 0, got %d (found: %v)", shardId, found) + } + if shardId, found := shardBits.IndexToShardId(1); !found || shardId != 2 { + t.Errorf("Expected index 1 to be shard 2, got %d (found: %v)", shardId, found) + } + if shardId, found := shardBits.IndexToShardId(2); !found || shardId != 5 { + t.Errorf("Expected index 2 to be shard 5, got %d (found: %v)", shardId, found) + } + + // Test for invalid index + if shardId, found := shardBits.IndexToShardId(3); found { + t.Errorf("Should not find shard for index 3, but got shard %d", shardId) + } + + // Test EachSetIndex + var collectedShards []ShardId + shardBits.EachSetIndex(func(shardId ShardId) { + collectedShards = append(collectedShards, shardId) + }) + expectedShards := []ShardId{0, 2, 5} + if len(collectedShards) != len(expectedShards) { + t.Errorf("Expected EachSetIndex to collect %v, got %v", expectedShards, collectedShards) + } + for i, expected := range expectedShards { + if i >= len(collectedShards) || collectedShards[i] != expected { + t.Errorf("Expected EachSetIndex to collect %v, got %v", expectedShards, collectedShards) + break + } + } +} diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index 33bc4ac7d..61057674f 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -227,6 +227,9 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage(diskId uint32) (messages [ } prevVolumeId = s.VolumeId m.EcIndexBits = uint32(ShardBits(m.EcIndexBits).AddShardId(s.ShardId)) + + // Add shard size information using the optimized format + SetShardSize(m, s.ShardId, s.Size()) } return } diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go index 787910b0c..53b352168 100644 --- a/weed/storage/erasure_coding/ec_volume_info.go +++ b/weed/storage/erasure_coding/ec_volume_info.go @@ -1,6 +1,8 @@ package erasure_coding import ( + "math/bits" + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) @@ -11,27 +13,51 @@ type EcVolumeInfo struct { Collection string ShardBits ShardBits DiskType string - DiskId uint32 // ID of the disk this EC volume is on - ExpireAtSec uint64 // ec volume destroy time, calculated from the ec volume was created -} - -func NewEcVolumeInfo(diskType string, collection string, vid needle.VolumeId, shardBits ShardBits, expireAtSec uint64, diskId uint32) *EcVolumeInfo { - return &EcVolumeInfo{ - Collection: collection, - VolumeId: vid, - ShardBits: shardBits, - DiskType: diskType, - DiskId: diskId, - ExpireAtSec: expireAtSec, - } + DiskId uint32 // ID of the disk this EC volume is on + ExpireAtSec uint64 // ec volume destroy time, calculated from the ec volume was created + ShardSizes []int64 // optimized: sizes for shards in order of set bits in ShardBits } func (ecInfo *EcVolumeInfo) AddShardId(id ShardId) { + oldBits := ecInfo.ShardBits ecInfo.ShardBits = ecInfo.ShardBits.AddShardId(id) + + // If shard was actually added, resize ShardSizes array + if oldBits != ecInfo.ShardBits { + ecInfo.resizeShardSizes(oldBits) + } } func (ecInfo *EcVolumeInfo) RemoveShardId(id ShardId) { + oldBits := ecInfo.ShardBits ecInfo.ShardBits = ecInfo.ShardBits.RemoveShardId(id) + + // If shard was actually removed, resize ShardSizes array + if oldBits != ecInfo.ShardBits { + ecInfo.resizeShardSizes(oldBits) + } +} + +func (ecInfo *EcVolumeInfo) SetShardSize(id ShardId, size int64) { + ecInfo.ensureShardSizesInitialized() + if index, found := ecInfo.ShardBits.ShardIdToIndex(id); found && index < len(ecInfo.ShardSizes) { + ecInfo.ShardSizes[index] = size + } +} + +func (ecInfo *EcVolumeInfo) GetShardSize(id ShardId) (int64, bool) { + if index, found := ecInfo.ShardBits.ShardIdToIndex(id); found && index < len(ecInfo.ShardSizes) { + return ecInfo.ShardSizes[index], true + } + return 0, false +} + +func (ecInfo *EcVolumeInfo) GetTotalSize() int64 { + var total int64 + for _, size := range ecInfo.ShardSizes { + total += size + } + return total } func (ecInfo *EcVolumeInfo) HasShardId(id ShardId) bool { @@ -48,17 +74,33 @@ func (ecInfo *EcVolumeInfo) ShardIdCount() (count int) { func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo { ret := &EcVolumeInfo{ - VolumeId: ecInfo.VolumeId, - Collection: ecInfo.Collection, - ShardBits: ecInfo.ShardBits.Minus(other.ShardBits), - DiskType: ecInfo.DiskType, + VolumeId: ecInfo.VolumeId, + Collection: ecInfo.Collection, + ShardBits: ecInfo.ShardBits.Minus(other.ShardBits), + DiskType: ecInfo.DiskType, + DiskId: ecInfo.DiskId, + ExpireAtSec: ecInfo.ExpireAtSec, + } + + // Initialize optimized ShardSizes for the result + ret.ensureShardSizesInitialized() + + // Copy shard sizes for remaining shards + retIndex := 0 + for shardId := ShardId(0); shardId < TotalShardsCount && retIndex < len(ret.ShardSizes); shardId++ { + if ret.ShardBits.HasShardId(shardId) { + if size, exists := ecInfo.GetShardSize(shardId); exists { + ret.ShardSizes[retIndex] = size + } + retIndex++ + } } return ret } func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb.VolumeEcShardInformationMessage) { - return &master_pb.VolumeEcShardInformationMessage{ + t := &master_pb.VolumeEcShardInformationMessage{ Id: uint32(ecInfo.VolumeId), EcIndexBits: uint32(ecInfo.ShardBits), Collection: ecInfo.Collection, @@ -66,6 +108,12 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb. ExpireAtSec: ecInfo.ExpireAtSec, DiskId: ecInfo.DiskId, } + + // Directly set the optimized ShardSizes + t.ShardSizes = make([]int64, len(ecInfo.ShardSizes)) + copy(t.ShardSizes, ecInfo.ShardSizes) + + return t } type ShardBits uint32 // use bits to indicate the shard id, use 32 bits just for possible future extension @@ -121,3 +169,81 @@ func (b ShardBits) MinusParityShards() ShardBits { } return b } + +// ShardIdToIndex converts a shard ID to its index position in the ShardSizes slice +// Returns the index and true if the shard is present, -1 and false if not present +func (b ShardBits) ShardIdToIndex(shardId ShardId) (index int, found bool) { + if !b.HasShardId(shardId) { + return -1, false + } + + // Create a mask for bits before the shardId + mask := uint32((1 << shardId) - 1) + // Count set bits before the shardId using efficient bit manipulation + index = bits.OnesCount32(uint32(b) & mask) + return index, true +} + +// EachSetIndex iterates over all set shard IDs and calls the provided function for each +// This is highly efficient using bit manipulation - only iterates over actual set bits +func (b ShardBits) EachSetIndex(fn func(shardId ShardId)) { + bitsValue := uint32(b) + for bitsValue != 0 { + // Find the position of the least significant set bit + shardId := ShardId(bits.TrailingZeros32(bitsValue)) + fn(shardId) + // Clear the least significant set bit + bitsValue &= bitsValue - 1 + } +} + +// IndexToShardId converts an index position in ShardSizes slice to the corresponding shard ID +// Returns the shard ID and true if valid index, -1 and false if invalid index +func (b ShardBits) IndexToShardId(index int) (shardId ShardId, found bool) { + if index < 0 { + return 0, false + } + + currentIndex := 0 + for i := ShardId(0); i < TotalShardsCount; i++ { + if b.HasShardId(i) { + if currentIndex == index { + return i, true + } + currentIndex++ + } + } + return 0, false // index out of range +} + +// Helper methods for EcVolumeInfo to manage the optimized ShardSizes slice +func (ecInfo *EcVolumeInfo) ensureShardSizesInitialized() { + expectedLength := ecInfo.ShardBits.ShardIdCount() + if ecInfo.ShardSizes == nil { + ecInfo.ShardSizes = make([]int64, expectedLength) + } else if len(ecInfo.ShardSizes) != expectedLength { + // Resize and preserve existing data + ecInfo.resizeShardSizes(ecInfo.ShardBits) + } +} + +func (ecInfo *EcVolumeInfo) resizeShardSizes(prevShardBits ShardBits) { + expectedLength := ecInfo.ShardBits.ShardIdCount() + newSizes := make([]int64, expectedLength) + + // Copy existing sizes to new positions based on current ShardBits + if len(ecInfo.ShardSizes) > 0 { + newIndex := 0 + for shardId := ShardId(0); shardId < TotalShardsCount && newIndex < expectedLength; shardId++ { + if ecInfo.ShardBits.HasShardId(shardId) { + // Try to find the size for this shard in the old array using previous ShardBits + if oldIndex, found := prevShardBits.ShardIdToIndex(shardId); found && oldIndex < len(ecInfo.ShardSizes) { + newSizes[newIndex] = ecInfo.ShardSizes[oldIndex] + } + newIndex++ + } + } + } + + ecInfo.ShardSizes = newSizes +} diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go index 0ad028625..844e92f55 100644 --- a/weed/topology/topology_ec.go +++ b/weed/topology/topology_ec.go @@ -17,14 +17,18 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf // convert into in memory struct storage.VolumeInfo var shards []*erasure_coding.EcVolumeInfo for _, shardInfo := range shardInfos { - shards = append(shards, - erasure_coding.NewEcVolumeInfo( - shardInfo.DiskType, - shardInfo.Collection, - needle.VolumeId(shardInfo.Id), - erasure_coding.ShardBits(shardInfo.EcIndexBits), - shardInfo.ExpireAtSec, - shardInfo.DiskId)) + // Create EcVolumeInfo directly with optimized format + ecVolumeInfo := &erasure_coding.EcVolumeInfo{ + VolumeId: needle.VolumeId(shardInfo.Id), + Collection: shardInfo.Collection, + ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits), + DiskType: shardInfo.DiskType, + DiskId: shardInfo.DiskId, + ExpireAtSec: shardInfo.ExpireAtSec, + ShardSizes: shardInfo.ShardSizes, + } + + shards = append(shards, ecVolumeInfo) } // find out the delta volumes newShards, deletedShards = dn.UpdateEcShards(shards) @@ -41,24 +45,32 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards // convert into in memory struct storage.VolumeInfo var newShards, deletedShards []*erasure_coding.EcVolumeInfo for _, shardInfo := range newEcShards { - newShards = append(newShards, - erasure_coding.NewEcVolumeInfo( - shardInfo.DiskType, - shardInfo.Collection, - needle.VolumeId(shardInfo.Id), - erasure_coding.ShardBits(shardInfo.EcIndexBits), - shardInfo.ExpireAtSec, - shardInfo.DiskId)) + // Create EcVolumeInfo directly with optimized format + ecVolumeInfo := &erasure_coding.EcVolumeInfo{ + VolumeId: needle.VolumeId(shardInfo.Id), + Collection: shardInfo.Collection, + ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits), + DiskType: shardInfo.DiskType, + DiskId: shardInfo.DiskId, + ExpireAtSec: shardInfo.ExpireAtSec, + ShardSizes: shardInfo.ShardSizes, + } + + newShards = append(newShards, ecVolumeInfo) } for _, shardInfo := range deletedEcShards { - deletedShards = append(deletedShards, - erasure_coding.NewEcVolumeInfo( - shardInfo.DiskType, - shardInfo.Collection, - needle.VolumeId(shardInfo.Id), - erasure_coding.ShardBits(shardInfo.EcIndexBits), - shardInfo.ExpireAtSec, - shardInfo.DiskId)) + // Create EcVolumeInfo directly with optimized format + ecVolumeInfo := &erasure_coding.EcVolumeInfo{ + VolumeId: needle.VolumeId(shardInfo.Id), + Collection: shardInfo.Collection, + ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits), + DiskType: shardInfo.DiskType, + DiskId: shardInfo.DiskId, + ExpireAtSec: shardInfo.ExpireAtSec, + ShardSizes: shardInfo.ShardSizes, + } + + deletedShards = append(deletedShards, ecVolumeInfo) } dn.DeltaUpdateEcShards(newShards, deletedShards) @@ -69,7 +81,6 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards for _, v := range deletedShards { t.UnRegisterEcShards(v, dn) } - return } func NewEcShardLocations(collection string) *EcShardLocations { @@ -178,6 +189,4 @@ func (t *Topology) DeleteEcCollection(collection string) { for _, vid := range vids { delete(t.ecShardMap, vid) } - - return } diff --git a/weed/worker/tasks/erasure_coding/detection.go b/weed/worker/tasks/erasure_coding/detection.go index 1122d2721..9cf87cdf6 100644 --- a/weed/worker/tasks/erasure_coding/detection.go +++ b/weed/worker/tasks/erasure_coding/detection.go @@ -168,19 +168,16 @@ func planECDestinations(activeTopology *topology.ActiveTopology, metric *types.V } } - // Determine minimum shard disk locations based on configuration - minTotalDisks := 4 - // Get available disks for EC placement (include source node for EC) availableDisks := activeTopology.GetAvailableDisks(topology.TaskTypeErasureCoding, "") - if len(availableDisks) < minTotalDisks { - return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", minTotalDisks, len(availableDisks)) + if len(availableDisks) < erasure_coding.MinTotalDisks { + return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", erasure_coding.MinTotalDisks, len(availableDisks)) } // Select best disks for EC placement with rack/DC diversity selectedDisks := selectBestECDestinations(availableDisks, sourceRack, sourceDC, erasure_coding.TotalShardsCount) - if len(selectedDisks) < minTotalDisks { - return nil, fmt.Errorf("found %d disks, but could not find %d suitable destinations for EC placement", len(selectedDisks), minTotalDisks) + if len(selectedDisks) < erasure_coding.MinTotalDisks { + return nil, fmt.Errorf("found %d disks, but could not find %d suitable destinations for EC placement", len(selectedDisks), erasure_coding.MinTotalDisks) } var plans []*topology.DestinationPlan diff --git a/weed/worker/tasks/erasure_coding/ec_task.go b/weed/worker/tasks/erasure_coding/ec_task.go index a6a3f749f..97332f63f 100644 --- a/weed/worker/tasks/erasure_coding/ec_task.go +++ b/weed/worker/tasks/erasure_coding/ec_task.go @@ -81,9 +81,6 @@ func (t *ErasureCodingTask) Execute(ctx context.Context, params *worker_pb.TaskP // Use the working directory from task parameters, or fall back to a default baseWorkDir := t.workDir - if baseWorkDir == "" { - baseWorkDir = "/tmp/seaweedfs_ec_work" - } // Create unique working directory for this task taskWorkDir := filepath.Join(baseWorkDir, fmt.Sprintf("vol_%d_%d", t.volumeID, time.Now().Unix()))