Browse Source

Delete legacy balancing code for `ec.encode`. (#6344)

pull/6349/head
Lisandro Pin 1 week ago
committed by GitHub
parent
commit
6320036c56
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 133
      weed/shell/command_ec_encode.go
  2. 13
      weed/shell/command_ec_test.go

133
weed/shell/command_ec_encode.go

@ -5,8 +5,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"math/rand"
"sync"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
@ -19,7 +17,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
) )
func init() { func init() {
@ -181,136 +178,6 @@ func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId,
} }
// TODO: delete this (now unused) shard spread logic.
func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location, parallelCopy bool) (err error) {
allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv)
if err != nil {
return err
}
if totalFreeEcSlots < erasure_coding.TotalShardsCount {
return fmt.Errorf("not enough free ec shard slots. only %d left", totalFreeEcSlots)
}
allocatedDataNodes := allEcNodes
if len(allocatedDataNodes) > erasure_coding.TotalShardsCount {
allocatedDataNodes = allocatedDataNodes[:erasure_coding.TotalShardsCount]
}
// calculate how many shards to allocate for these servers
allocatedEcIds := balancedEcDistribution(allocatedDataNodes)
// ask the data nodes to copy from the source volume server
copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0], parallelCopy)
if err != nil {
return err
}
// unmount the to be deleted shards
err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].ServerAddress(), copiedShardIds)
if err != nil {
return err
}
// ask the source volume server to clean up copied ec shards
err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].ServerAddress(), copiedShardIds)
if err != nil {
return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err)
}
// ask the source volume server to delete the original volume
for _, location := range existingLocations {
fmt.Printf("delete volume %d from %s\n", volumeId, location.Url)
err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.ServerAddress(), false)
if err != nil {
return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err)
}
}
return err
}
func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location, parallelCopy bool) (actuallyCopied []uint32, err error) {
fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url)
var wg sync.WaitGroup
shardIdChan := make(chan []uint32, len(targetServers))
copyFunc := func(server *EcNode, allocatedEcShardIds []uint32) {
defer wg.Done()
copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
allocatedEcShardIds, volumeId, collection, existingLocation.ServerAddress())
if copyErr != nil {
err = copyErr
} else {
shardIdChan <- copiedShardIds
server.addEcVolumeShards(volumeId, collection, copiedShardIds)
}
}
cleanupFunc := func(server *EcNode, allocatedEcShardIds []uint32) {
if err := unmountEcShards(grpcDialOption, volumeId, pb.NewServerAddressFromDataNode(server.info), allocatedEcShardIds); err != nil {
fmt.Printf("unmount aborted shards %d.%v on %s: %v\n", volumeId, allocatedEcShardIds, server.info.Id, err)
}
if err := sourceServerDeleteEcShards(grpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(server.info), allocatedEcShardIds); err != nil {
fmt.Printf("remove aborted shards %d.%v on target server %s: %v\n", volumeId, allocatedEcShardIds, server.info.Id, err)
}
if err := sourceServerDeleteEcShards(grpcDialOption, collection, volumeId, existingLocation.ServerAddress(), allocatedEcShardIds); err != nil {
fmt.Printf("remove aborted shards %d.%v on existing server %s: %v\n", volumeId, allocatedEcShardIds, existingLocation.ServerAddress(), err)
}
}
// maybe parallelize
for i, server := range targetServers {
if len(allocatedEcIds[i]) <= 0 {
continue
}
wg.Add(1)
if parallelCopy {
go copyFunc(server, allocatedEcIds[i])
} else {
copyFunc(server, allocatedEcIds[i])
}
}
wg.Wait()
close(shardIdChan)
if err != nil {
for i, server := range targetServers {
if len(allocatedEcIds[i]) <= 0 {
continue
}
cleanupFunc(server, allocatedEcIds[i])
}
return nil, err
}
for shardIds := range shardIdChan {
actuallyCopied = append(actuallyCopied, shardIds...)
}
return
}
func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) {
allocated = make([][]uint32, len(servers))
allocatedShardIdIndex := uint32(0)
serverIndex := rand.Intn(len(servers))
for allocatedShardIdIndex < erasure_coding.TotalShardsCount {
if servers[serverIndex].freeEcSlot > 0 {
allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex)
allocatedShardIdIndex++
}
serverIndex++
if serverIndex >= len(servers) {
serverIndex = 0
}
}
return allocated
}
func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
// collect topology information // collect topology information
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0) topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0)

13
weed/shell/command_ec_test.go

@ -1,25 +1,12 @@
package shell package shell
import ( import (
"fmt"
"testing" "testing"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
) )
func TestCommandEcDistribution(t *testing.T) {
allEcNodes := []*EcNode{
newEcNode("dc1", "rack1", "dn1", 100),
newEcNode("dc1", "rack2", "dn2", 100),
}
allocated := balancedEcDistribution(allEcNodes)
fmt.Printf("allocated: %+v", allocated)
}
func TestCommandEcBalanceSmall(t *testing.T) { func TestCommandEcBalanceSmall(t *testing.T) {
ecb := &ecBalancer{ ecb := &ecBalancer{
ecNodes: []*EcNode{ ecNodes: []*EcNode{

Loading…
Cancel
Save