Browse Source

Rework parameters passing for functions within `ec.rebuild` (#7445)

* Rework parameters passing for functions within `ec.rebuild`

This simplifies the overall codebase and allows to cleanly handle parallelization via waitgroups.

* fix copy source

* add tests

* remove tests not useful

* fmt

* nil check

---------

Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
Co-authored-by: chrislu <chris.lu@gmail.com>
pull/6065/merge
Lisandro Pin 4 days ago
committed by GitHub
parent
commit
79fa87bad4
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 102
      weed/shell/command_ec_rebuild.go
  2. 309
      weed/shell/command_ec_rebuild_test.go

102
weed/shell/command_ec_rebuild.go

@ -11,13 +11,21 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"google.golang.org/grpc"
)
func init() {
Commands = append(Commands, &commandEcRebuild{})
}
type ecRebuilder struct {
// TODO: add ErrorWaitGroup for parallelization
commandEnv *CommandEnv
ecNodes []*EcNode
writer io.Writer
applyChanges bool
collections []string
}
type commandEcRebuild struct {
}
@ -93,10 +101,18 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
collections = []string{*collection}
}
erb := &ecRebuilder{
commandEnv: commandEnv,
ecNodes: allEcNodes,
writer: writer,
applyChanges: *applyChanges,
collections: collections,
}
fmt.Printf("rebuildEcVolumes for %d collection(s)\n", len(collections))
for _, c := range collections {
fmt.Printf("rebuildEcVolumes collection %s\n", c)
if err = rebuildEcVolumes(commandEnv, allEcNodes, c, writer, *applyChanges); err != nil {
if err = erb.rebuildEcVolumes(c); err != nil {
return err
}
}
@ -104,13 +120,36 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
return nil
}
func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error {
func (erb *ecRebuilder) write(format string, a ...any) {
fmt.Fprintf(erb.writer, format, a...)
}
func (erb *ecRebuilder) isLocked() bool {
return erb.commandEnv.isLocked()
}
// ecNodeWithMoreFreeSlots returns the EC node with higher free slot count, from all nodes visible to the rebuilder.
func (erb *ecRebuilder) ecNodeWithMoreFreeSlots() *EcNode {
if len(erb.ecNodes) == 0 {
return nil
}
res := erb.ecNodes[0]
for i := 1; i < len(erb.ecNodes); i++ {
if erb.ecNodes[i].freeEcSlot > res.freeEcSlot {
res = erb.ecNodes[i]
}
}
return res
}
func (erb *ecRebuilder) rebuildEcVolumes(collection string) error {
fmt.Printf("rebuildEcVolumes %s\n", collection)
// collect vid => each shard locations, similar to ecShardMap in topology.go
ecShardMap := make(EcShardMap)
for _, ecNode := range allEcNodes {
for _, ecNode := range erb.ecNodes {
ecShardMap.registerEcNode(ecNode, collection)
}
@ -120,16 +159,10 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
continue
}
if shardCount < erasure_coding.DataShardsCount {
return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount)
}
sortEcNodesByFreeslotsDescending(allEcNodes)
if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount {
return fmt.Errorf("disk space is not enough")
return fmt.Errorf("ec volume %d is unrepairable with %d shards", vid, shardCount)
}
if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
if err := erb.rebuildOneEcVolume(collection, vid, locations); err != nil {
return err
}
}
@ -137,17 +170,25 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
return nil
}
func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
if !commandEnv.isLocked() {
func (erb *ecRebuilder) rebuildOneEcVolume(collection string, volumeId needle.VolumeId, locations EcShardLocations) error {
if !erb.isLocked() {
return fmt.Errorf("lock is lost")
}
// TODO: fix this logic so it supports concurrent executions
rebuilder := erb.ecNodeWithMoreFreeSlots()
if rebuilder == nil {
return fmt.Errorf("no EC nodes available for rebuild")
}
if rebuilder.freeEcSlot < erasure_coding.TotalShardsCount {
return fmt.Errorf("disk space is not enough")
}
fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId)
// collect shard files to rebuilder local disk
var generatedShardIds []uint32
copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
copiedShardIds, _, err := erb.prepareDataToRecover(rebuilder, collection, volumeId, locations)
if err != nil {
return err
}
@ -155,25 +196,25 @@ func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection st
// clean up working files
// ask the rebuilder to delete the copied shards
err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), copiedShardIds)
err = sourceServerDeleteEcShards(erb.commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), copiedShardIds)
if err != nil {
fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
erb.write("%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
}
}()
if !applyChanges {
if !erb.applyChanges {
return nil
}
// generate ec shards, and maybe ecx file
generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info))
generatedShardIds, err = erb.generateMissingShards(collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info))
if err != nil {
return err
}
// mount the generated shards
err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), generatedShardIds)
err = mountEcShards(erb.commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), generatedShardIds)
if err != nil {
return err
}
@ -183,9 +224,9 @@ func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection st
return nil
}
func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress) (rebuiltShardIds []uint32, err error) {
func (erb *ecRebuilder) generateMissingShards(collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress) (rebuiltShardIds []uint32, err error) {
err = operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
err = operation.WithVolumeServerClient(false, sourceLocation, erb.commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
resp, rebuildErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{
VolumeId: uint32(volumeId),
Collection: collection,
@ -198,7 +239,7 @@ func generateMissingShards(grpcDialOption grpc.DialOption, collection string, vo
return
}
func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
func (erb *ecRebuilder) prepareDataToRecover(rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations) (copiedShardIds []uint32, localShardIds []uint32, err error) {
needEcxFile := true
var localShardBits erasure_coding.ShardBits
@ -212,21 +253,20 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
}
for shardId, ecNodes := range locations {
if len(ecNodes) == 0 {
fmt.Fprintf(writer, "missing shard %d.%d\n", volumeId, shardId)
erb.write("missing shard %d.%d\n", volumeId, shardId)
continue
}
if localShardBits.HasShardId(erasure_coding.ShardId(shardId)) {
localShardIds = append(localShardIds, uint32(shardId))
fmt.Fprintf(writer, "use existing shard %d.%d\n", volumeId, shardId)
erb.write("use existing shard %d.%d\n", volumeId, shardId)
continue
}
var copyErr error
if applyBalancing {
copyErr = operation.WithVolumeServerClient(false, pb.NewServerAddressFromDataNode(rebuilder.info), commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
if erb.applyChanges {
copyErr = operation.WithVolumeServerClient(false, pb.NewServerAddressFromDataNode(rebuilder.info), erb.commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(volumeId),
Collection: collection,
@ -243,9 +283,9 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
}
}
if copyErr != nil {
fmt.Fprintf(writer, "%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id, copyErr)
erb.write("%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id, copyErr)
} else {
fmt.Fprintf(writer, "%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id)
erb.write("%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id)
copiedShardIds = append(copiedShardIds, uint32(shardId))
}

309
weed/shell/command_ec_rebuild_test.go

@ -0,0 +1,309 @@
package shell
import (
"bytes"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
)
// TestEcShardMapRegister tests that EC shards are properly registered
func TestEcShardMapRegister(t *testing.T) {
ecShardMap := make(EcShardMap)
// Create test nodes with EC shards
node1 := newEcNode("dc1", "rack1", "node1", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6})
node2 := newEcNode("dc1", "rack1", "node2", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13})
ecShardMap.registerEcNode(node1, "c1")
ecShardMap.registerEcNode(node2, "c1")
// Verify volume 1 is registered
locations, found := ecShardMap[needle.VolumeId(1)]
if !found {
t.Fatal("Expected volume 1 to be registered")
}
// Check shard count
count := locations.shardCount()
if count != erasure_coding.TotalShardsCount {
t.Errorf("Expected %d shards, got %d", erasure_coding.TotalShardsCount, count)
}
// Verify shard distribution
for i := 0; i < 7; i++ {
if len(locations[i]) != 1 || locations[i][0].info.Id != "node1" {
t.Errorf("Shard %d should be on node1", i)
}
}
for i := 7; i < erasure_coding.TotalShardsCount; i++ {
if len(locations[i]) != 1 || locations[i][0].info.Id != "node2" {
t.Errorf("Shard %d should be on node2", i)
}
}
}
// TestEcShardMapShardCount tests shard counting
func TestEcShardMapShardCount(t *testing.T) {
testCases := []struct {
name string
shardIds []uint32
expectedCount int
}{
{"all shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 14},
{"data shards only", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 10},
{"parity shards only", []uint32{10, 11, 12, 13}, 4},
{"missing some shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8}, 9},
{"single shard", []uint32{0}, 1},
{"no shards", []uint32{}, 0},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
locations := make(EcShardLocations, erasure_coding.MaxShardCount)
for _, shardId := range tc.shardIds {
locations[shardId] = []*EcNode{
newEcNode("dc1", "rack1", "node1", 100),
}
}
count := locations.shardCount()
if count != tc.expectedCount {
t.Errorf("Expected %d shards, got %d", tc.expectedCount, count)
}
})
}
}
// TestEcRebuilderEcNodeWithMoreFreeSlots tests the free slot selection
func TestEcRebuilderEcNodeWithMoreFreeSlots(t *testing.T) {
testCases := []struct {
name string
nodes []*EcNode
expectedNode string
}{
{
name: "single node",
nodes: []*EcNode{
newEcNode("dc1", "rack1", "node1", 100),
},
expectedNode: "node1",
},
{
name: "multiple nodes - select highest",
nodes: []*EcNode{
newEcNode("dc1", "rack1", "node1", 50),
newEcNode("dc1", "rack1", "node2", 150),
newEcNode("dc1", "rack1", "node3", 100),
},
expectedNode: "node2",
},
{
name: "multiple nodes - same slots",
nodes: []*EcNode{
newEcNode("dc1", "rack1", "node1", 100),
newEcNode("dc1", "rack1", "node2", 100),
},
expectedNode: "node1", // Should return first one
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
erb := &ecRebuilder{
ecNodes: tc.nodes,
}
node := erb.ecNodeWithMoreFreeSlots()
if node == nil {
t.Fatal("Expected a node, got nil")
}
if node.info.Id != tc.expectedNode {
t.Errorf("Expected node %s, got %s", tc.expectedNode, node.info.Id)
}
})
}
}
// TestEcRebuilderEcNodeWithMoreFreeSlotsEmpty tests empty node list
func TestEcRebuilderEcNodeWithMoreFreeSlotsEmpty(t *testing.T) {
erb := &ecRebuilder{
ecNodes: []*EcNode{},
}
node := erb.ecNodeWithMoreFreeSlots()
if node != nil {
t.Errorf("Expected nil for empty node list, got %v", node)
}
}
// TestRebuildEcVolumesInsufficientShards tests error handling for unrepairable volumes
func TestRebuildEcVolumesInsufficientShards(t *testing.T) {
var logBuffer bytes.Buffer
// Create a volume with insufficient shards (less than DataShardsCount)
node1 := newEcNode("dc1", "rack1", "node1", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4}) // Only 5 shards
erb := &ecRebuilder{
commandEnv: &CommandEnv{
env: make(map[string]string),
noLock: true, // Bypass lock check for unit test
},
ecNodes: []*EcNode{node1},
writer: &logBuffer,
}
err := erb.rebuildEcVolumes("c1")
if err == nil {
t.Fatal("Expected error for insufficient shards, got nil")
}
if !strings.Contains(err.Error(), "unrepairable") {
t.Errorf("Expected 'unrepairable' in error message, got: %s", err.Error())
}
}
// TestRebuildEcVolumesCompleteVolume tests that complete volumes are skipped
func TestRebuildEcVolumesCompleteVolume(t *testing.T) {
var logBuffer bytes.Buffer
// Create a volume with all shards
node1 := newEcNode("dc1", "rack1", "node1", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
erb := &ecRebuilder{
commandEnv: &CommandEnv{
env: make(map[string]string),
noLock: true, // Bypass lock check for unit test
},
ecNodes: []*EcNode{node1},
writer: &logBuffer,
applyChanges: false,
}
err := erb.rebuildEcVolumes("c1")
if err != nil {
t.Fatalf("Expected no error for complete volume, got: %v", err)
}
// The function should return quickly without attempting rebuild
// since the volume is already complete
}
// TestRebuildEcVolumesInsufficientSpace tests error handling for insufficient disk space
func TestRebuildEcVolumesInsufficientSpace(t *testing.T) {
var logBuffer bytes.Buffer
// Create a volume with missing shards but insufficient free slots
node1 := newEcNode("dc1", "rack1", "node1", 5). // Only 5 free slots, need 14
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
erb := &ecRebuilder{
commandEnv: &CommandEnv{
env: make(map[string]string),
noLock: true, // Bypass lock check for unit test
},
ecNodes: []*EcNode{node1},
writer: &logBuffer,
applyChanges: false,
}
err := erb.rebuildEcVolumes("c1")
if err == nil {
t.Fatal("Expected error for insufficient disk space, got nil")
}
if !strings.Contains(err.Error(), "disk space is not enough") {
t.Errorf("Expected 'disk space' in error message, got: %s", err.Error())
}
}
// TestMultipleNodesWithShards tests rebuild with shards distributed across multiple nodes
func TestMultipleNodesWithShards(t *testing.T) {
ecShardMap := make(EcShardMap)
// Create 3 nodes with different shards
node1 := newEcNode("dc1", "rack1", "node1", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
node2 := newEcNode("dc1", "rack1", "node2", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{4, 5, 6, 7})
node3 := newEcNode("dc1", "rack1", "node3", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{8, 9})
ecShardMap.registerEcNode(node1, "c1")
ecShardMap.registerEcNode(node2, "c1")
ecShardMap.registerEcNode(node3, "c1")
locations := ecShardMap[needle.VolumeId(1)]
count := locations.shardCount()
// We have 10 shards total, which is enough for data shards
if count != 10 {
t.Errorf("Expected 10 shards, got %d", count)
}
// Verify each shard is on the correct node
for i := 0; i < 4; i++ {
if len(locations[i]) != 1 || locations[i][0].info.Id != "node1" {
t.Errorf("Shard %d should be on node1", i)
}
}
for i := 4; i < 8; i++ {
if len(locations[i]) != 1 || locations[i][0].info.Id != "node2" {
t.Errorf("Shard %d should be on node2", i)
}
}
for i := 8; i < 10; i++ {
if len(locations[i]) != 1 || locations[i][0].info.Id != "node3" {
t.Errorf("Shard %d should be on node3", i)
}
}
}
// TestDuplicateShards tests handling of duplicate shards on multiple nodes
func TestDuplicateShards(t *testing.T) {
ecShardMap := make(EcShardMap)
// Create 2 nodes with overlapping shards (both have shard 0)
node1 := newEcNode("dc1", "rack1", "node1", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
node2 := newEcNode("dc1", "rack1", "node2", 100).
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 4, 5, 6}) // Duplicate shard 0
ecShardMap.registerEcNode(node1, "c1")
ecShardMap.registerEcNode(node2, "c1")
locations := ecShardMap[needle.VolumeId(1)]
// Shard 0 should be on both nodes
if len(locations[0]) != 2 {
t.Errorf("Expected shard 0 on 2 nodes, got %d", len(locations[0]))
}
// Verify both nodes are registered for shard 0
foundNode1 := false
foundNode2 := false
for _, node := range locations[0] {
if node.info.Id == "node1" {
foundNode1 = true
}
if node.info.Id == "node2" {
foundNode2 = true
}
}
if !foundNode1 || !foundNode2 {
t.Error("Both nodes should have shard 0")
}
// Shard count should be 7 (unique shards: 0, 1, 2, 3, 4, 5, 6)
count := locations.shardCount()
if count != 7 {
t.Errorf("Expected 7 unique shards, got %d", count)
}
}
Loading…
Cancel
Save