Browse Source

Merge branch 'master' into mq

mq
chrislu 2 days ago
parent
commit
80b4a311fa
  1. 12
      weed/shell/command_ec_common.go
  2. 96
      weed/shell/command_ec_common_test.go
  3. 12
      weed/shell/command_ec_encode.go

12
weed/shell/command_ec_common.go

@ -248,14 +248,14 @@ func collectCollectionsForVolumeIds(t *master_pb.TopologyInfo, vids []needle.Vol
for _, diskInfo := range dn.DiskInfos {
for _, vi := range diskInfo.VolumeInfos {
for _, vid := range vids {
if needle.VolumeId(vi.Id) == vid && vi.Collection != "" {
if needle.VolumeId(vi.Id) == vid {
found[vi.Collection] = true
}
}
}
for _, ecs := range diskInfo.EcShardInfos {
for _, vid := range vids {
if needle.VolumeId(ecs.Id) == vid && ecs.Collection != "" {
if needle.VolumeId(ecs.Id) == vid {
found[ecs.Collection] = true
}
}
@ -429,7 +429,13 @@ func countFreeShardSlots(dn *master_pb.DataNodeInfo, diskType types.DiskType) (c
if diskInfo == nil {
return 0
}
return int(diskInfo.MaxVolumeCount-diskInfo.VolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos)
slots := int(diskInfo.MaxVolumeCount-diskInfo.VolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos)
if slots < 0 {
return 0
}
return slots
}
func (ecNode *EcNode) localShardIdCount(vid uint32) int {

96
weed/shell/command_ec_common_test.go

@ -10,6 +10,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
)
var (
@ -44,9 +45,9 @@ func TestCollectCollectionsForVolumeIds(t *testing.T) {
{topology1, nil, nil},
{topology1, []needle.VolumeId{}, nil},
{topology1, []needle.VolumeId{needle.VolumeId(9999)}, nil},
{topology1, []needle.VolumeId{needle.VolumeId(2)}, nil},
{topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272)}, []string{"collection2"}},
{topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272), needle.VolumeId(299)}, []string{"collection2"}},
{topology1, []needle.VolumeId{needle.VolumeId(2)}, []string{""}},
{topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272)}, []string{"", "collection2"}},
{topology1, []needle.VolumeId{needle.VolumeId(2), needle.VolumeId(272), needle.VolumeId(299)}, []string{"", "collection2"}},
{topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95)}, []string{"collection1", "collection2"}},
{topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51)}, []string{"collection1", "collection2"}},
{topology1, []needle.VolumeId{needle.VolumeId(272), needle.VolumeId(299), needle.VolumeId(95), needle.VolumeId(51), needle.VolumeId(15)}, []string{"collection0", "collection1", "collection2"}},
@ -264,3 +265,92 @@ func TestPickEcNodeToBalanceShardsInto(t *testing.T) {
}
}
}
func TestCountFreeShardSlots(t *testing.T) {
testCases := []struct {
name string
topology *master_pb.TopologyInfo
diskType types.DiskType
want map[string]int
}{
{
name: "topology #1, free HDD shards",
topology: topology1,
diskType: types.HardDriveType,
want: map[string]int{
"192.168.1.1:8080": 17330,
"192.168.1.2:8080": 1540,
"192.168.1.4:8080": 1900,
"192.168.1.5:8080": 27010,
"192.168.1.6:8080": 17420,
},
},
{
name: "topology #1, no free SSD shards available",
topology: topology1,
diskType: types.SsdType,
want: map[string]int{
"192.168.1.1:8080": 0,
"192.168.1.2:8080": 0,
"192.168.1.4:8080": 0,
"192.168.1.5:8080": 0,
"192.168.1.6:8080": 0,
},
},
{
name: "topology #2, no negative free HDD shards",
topology: topology2,
diskType: types.HardDriveType,
want: map[string]int{
"172.19.0.3:8708": 0,
"172.19.0.4:8707": 8,
"172.19.0.5:8705": 58,
"172.19.0.6:8713": 39,
"172.19.0.8:8709": 8,
"172.19.0.9:8712": 0,
"172.19.0.10:8702": 0,
"172.19.0.13:8701": 0,
"172.19.0.14:8711": 0,
"172.19.0.16:8704": 89,
"172.19.0.17:8703": 0,
"172.19.0.19:8700": 9,
"172.19.0.20:8706": 0,
"172.19.0.21:8710": 9,
},
},
{
name: "topology #2, no free SSD shards available",
topology: topology2,
diskType: types.SsdType,
want: map[string]int{
"172.19.0.10:8702": 0,
"172.19.0.13:8701": 0,
"172.19.0.14:8711": 0,
"172.19.0.16:8704": 0,
"172.19.0.17:8703": 0,
"172.19.0.19:8700": 0,
"172.19.0.20:8706": 0,
"172.19.0.21:8710": 0,
"172.19.0.3:8708": 0,
"172.19.0.4:8707": 0,
"172.19.0.5:8705": 0,
"172.19.0.6:8713": 0,
"172.19.0.8:8709": 0,
"172.19.0.9:8712": 0,
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := map[string]int{}
eachDataNode(tc.topology, func(dc DataCenterId, rack RackId, dn *master_pb.DataNodeInfo) {
got[dn.Id] = countFreeShardSlots(dn, tc.diskType)
})
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("got %v, want %v", got, tc.want)
}
})
}
}

12
weed/shell/command_ec_encode.go

@ -98,23 +98,19 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
}
}
var collections []string
var volumeIds []needle.VolumeId
if vid := needle.VolumeId(*volumeId); vid != 0 {
// volumeId is provided
volumeIds = append(volumeIds, vid)
collections = collectCollectionsForVolumeIds(topologyInfo, volumeIds)
} else {
// apply to all volumes in the collection
// apply to all volumes for the given collection
volumeIds, err = collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
if err != nil {
return err
}
}
var collections []string
if *collection != "" {
collections = []string{*collection}
} else {
collections = collectCollectionsForVolumeIds(topologyInfo, volumeIds)
collections = append(collections, *collection)
}
// encode all requested volumes...

Loading…
Cancel
Save