Browse Source

Rework parameters passing for functions within `ec.rebuild`

This simplifies the overall codebase and allows to cleanly handle parallelization via waitgroups.
pull/7445/head
Lisandro Pin 2 months ago
parent
commit
503fef7a9d
Failed to extract signature
  1. 99
      weed/shell/command_ec_rebuild.go

99
weed/shell/command_ec_rebuild.go

@ -11,13 +11,21 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"google.golang.org/grpc"
) )
func init() { func init() {
Commands = append(Commands, &commandEcRebuild{}) Commands = append(Commands, &commandEcRebuild{})
} }
type ecRebuilder struct {
// TODO: add ErrorWaitGroup for parallelization
commandEnv *CommandEnv
ecNodes []*EcNode
writer io.Writer
applyChanges bool
collections []string
}
type commandEcRebuild struct { type commandEcRebuild struct {
} }
@ -89,10 +97,18 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
collections = []string{*collection} collections = []string{*collection}
} }
erb := &ecRebuilder{
commandEnv: commandEnv,
ecNodes: allEcNodes,
writer: writer,
applyChanges: *applyChanges,
collections: collections,
}
fmt.Printf("rebuildEcVolumes for %d collection(s)\n", len(collections)) fmt.Printf("rebuildEcVolumes for %d collection(s)\n", len(collections))
for _, c := range collections { for _, c := range collections {
fmt.Printf("rebuildEcVolumes collection %s\n", c) fmt.Printf("rebuildEcVolumes collection %s\n", c)
if err = rebuildEcVolumes(commandEnv, allEcNodes, c, writer, *applyChanges); err != nil {
if err = erb.rebuildEcVolumes(c); err != nil {
return err return err
} }
} }
@ -100,13 +116,36 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
return nil return nil
} }
func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error {
func (erb *ecRebuilder) write(format string, a ...any) {
fmt.Fprintf(erb.writer, format, a...)
}
func (erb *ecRebuilder) isLocked() bool {
return erb.commandEnv.isLocked()
}
// ecNodeWithMoreFreeSlots returns the EC node with higher free slot count, from all nodes visible to the rebuilder.
func (erb *ecRebuilder) ecNodeWithMoreFreeSlots() *EcNode {
if len(erb.ecNodes) == 0 {
return nil
}
res := erb.ecNodes[0]
for i := 1; i < len(erb.ecNodes); i++ {
if erb.ecNodes[i].freeEcSlot > res.freeEcSlot {
res = erb.ecNodes[i]
}
}
return res
}
func (erb *ecRebuilder) rebuildEcVolumes(collection string) error {
fmt.Printf("rebuildEcVolumes %s\n", collection) fmt.Printf("rebuildEcVolumes %s\n", collection)
// collect vid => each shard locations, similar to ecShardMap in topology.go // collect vid => each shard locations, similar to ecShardMap in topology.go
ecShardMap := make(EcShardMap) ecShardMap := make(EcShardMap)
for _, ecNode := range allEcNodes {
for _, ecNode := range erb.ecNodes {
ecShardMap.registerEcNode(ecNode, collection) ecShardMap.registerEcNode(ecNode, collection)
} }
@ -119,13 +158,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount) return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount)
} }
sortEcNodesByFreeslotsDescending(allEcNodes)
if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount {
return fmt.Errorf("disk space is not enough")
}
if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
if err := erb.rebuildOneEcVolume(collection, vid, locations); err != nil {
return err return err
} }
} }
@ -133,17 +166,22 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
return nil return nil
} }
func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
if !commandEnv.isLocked() {
func (erb *ecRebuilder) rebuildOneEcVolume(collection string, volumeId needle.VolumeId, locations EcShardLocations) error {
if !erb.isLocked() {
return fmt.Errorf("lock is lost") return fmt.Errorf("lock is lost")
} }
// TODO: fix this logic so it supports concurrent executions
rebuilder := erb.ecNodeWithMoreFreeSlots()
if rebuilder.freeEcSlot < erasure_coding.TotalShardsCount {
return fmt.Errorf("disk space is not enough")
}
fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId) fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId)
// collect shard files to rebuilder local disk // collect shard files to rebuilder local disk
var generatedShardIds []uint32 var generatedShardIds []uint32
copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
copiedShardIds, _, err := erb.prepareDataToRecover(rebuilder, collection, volumeId, locations)
if err != nil { if err != nil {
return err return err
} }
@ -151,25 +189,25 @@ func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection st
// clean up working files // clean up working files
// ask the rebuilder to delete the copied shards // ask the rebuilder to delete the copied shards
err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), copiedShardIds)
err = sourceServerDeleteEcShards(erb.commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), copiedShardIds)
if err != nil { if err != nil {
fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
erb.write("%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
} }
}() }()
if !applyChanges {
if !erb.applyChanges {
return nil return nil
} }
// generate ec shards, and maybe ecx file // generate ec shards, and maybe ecx file
generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info))
generatedShardIds, err = erb.generateMissingShards(collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info))
if err != nil { if err != nil {
return err return err
} }
// mount the generated shards // mount the generated shards
err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), generatedShardIds)
err = mountEcShards(erb.commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), generatedShardIds)
if err != nil { if err != nil {
return err return err
} }
@ -179,9 +217,9 @@ func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection st
return nil return nil
} }
func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress) (rebuiltShardIds []uint32, err error) {
func (erb *ecRebuilder) generateMissingShards(collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress) (rebuiltShardIds []uint32, err error) {
err = operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
err = operation.WithVolumeServerClient(false, sourceLocation, erb.commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
resp, rebuildErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{ resp, rebuildErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{
VolumeId: uint32(volumeId), VolumeId: uint32(volumeId),
Collection: collection, Collection: collection,
@ -194,7 +232,7 @@ func generateMissingShards(grpcDialOption grpc.DialOption, collection string, vo
return return
} }
func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
func (erb *ecRebuilder) prepareDataToRecover(rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations) (copiedShardIds []uint32, localShardIds []uint32, err error) {
needEcxFile := true needEcxFile := true
var localShardBits erasure_coding.ShardBits var localShardBits erasure_coding.ShardBits
@ -208,21 +246,20 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
} }
for shardId, ecNodes := range locations { for shardId, ecNodes := range locations {
if len(ecNodes) == 0 { if len(ecNodes) == 0 {
fmt.Fprintf(writer, "missing shard %d.%d\n", volumeId, shardId)
erb.write("missing shard %d.%d\n", volumeId, shardId)
continue continue
} }
if localShardBits.HasShardId(erasure_coding.ShardId(shardId)) { if localShardBits.HasShardId(erasure_coding.ShardId(shardId)) {
localShardIds = append(localShardIds, uint32(shardId)) localShardIds = append(localShardIds, uint32(shardId))
fmt.Fprintf(writer, "use existing shard %d.%d\n", volumeId, shardId)
erb.write("use existing shard %d.%d\n", volumeId, shardId)
continue continue
} }
var copyErr error var copyErr error
if applyBalancing {
copyErr = operation.WithVolumeServerClient(false, pb.NewServerAddressFromDataNode(rebuilder.info), commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
if erb.applyChanges {
copyErr = operation.WithVolumeServerClient(false, pb.NewServerAddressFromDataNode(rebuilder.info), erb.commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(volumeId), VolumeId: uint32(volumeId),
Collection: collection, Collection: collection,
@ -230,7 +267,7 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
CopyEcxFile: needEcxFile, CopyEcxFile: needEcxFile,
CopyEcjFile: true, CopyEcjFile: true,
CopyVifFile: needEcxFile, CopyVifFile: needEcxFile,
SourceDataNode: ecNodes[0].info.Id,
SourceDataNode: rebuilder.info.Id,
}) })
return copyErr return copyErr
}) })
@ -239,9 +276,9 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
} }
} }
if copyErr != nil { if copyErr != nil {
fmt.Fprintf(writer, "%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id, copyErr)
erb.write("%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, rebuilder.info.Id, copyErr)
} else { } else {
fmt.Fprintf(writer, "%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id)
erb.write("%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, rebuilder.info.Id)
copiedShardIds = append(copiedShardIds, uint32(shardId)) copiedShardIds = append(copiedShardIds, uint32(shardId))
} }

Loading…
Cancel
Save