Chris Lu
4 years ago
50 changed files with 1644 additions and 1321 deletions
-
4weed/command/mount_std.go
-
5weed/command/volume.go
-
4weed/filesys/wfs.go
-
44weed/pb/master.proto
-
1430weed/pb/master_pb/master.pb.go
-
11weed/server/master_grpc_server.go
-
8weed/server/master_grpc_server_volume.go
-
4weed/server/master_server_handlers_admin.go
-
3weed/server/volume_grpc_admin.go
-
1weed/server/volume_grpc_client_to_master.go
-
3weed/server/volume_grpc_copy.go
-
2weed/server/volume_grpc_erasure_coding.go
-
3weed/server/volume_server.go
-
17weed/shell/command_ec_balance.go
-
30weed/shell/command_ec_common.go
-
10weed/shell/command_ec_decode.go
-
4weed/shell/command_ec_encode.go
-
8weed/shell/command_ec_rebuild.go
-
36weed/shell/command_volume_balance.go
-
4weed/shell/command_volume_configure_replication.go
-
18weed/shell/command_volume_fix_replication.go
-
6weed/shell/command_volume_fsck.go
-
36weed/shell/command_volume_list.go
-
11weed/shell/command_volume_server_evacuate.go
-
4weed/shell/command_volume_tier_download.go
-
108weed/shell/command_volume_tier_move.go
-
5weed/storage/disk_location.go
-
4weed/storage/disk_location_ec.go
-
6weed/storage/erasure_coding/ec_shard.go
-
6weed/storage/erasure_coding/ec_volume.go
-
6weed/storage/erasure_coding/ec_volume_info.go
-
13weed/storage/store.go
-
2weed/storage/store_ec.go
-
2weed/storage/types/volume_disk_type.go
-
2weed/storage/volume.go
-
10weed/topology/collection.go
-
15weed/topology/data_center.go
-
161weed/topology/data_node.go
-
122weed/topology/data_node_ec.go
-
275weed/topology/disk.go
-
84weed/topology/disk_ec.go
-
170weed/topology/node.go
-
21weed/topology/rack.go
-
12weed/topology/topology.go
-
3weed/topology/topology_ec.go
-
23weed/topology/topology_event_handling.go
-
15weed/topology/topology_map.go
-
3weed/topology/topology_test.go
-
4weed/topology/volume_growth.go
-
5weed/topology/volume_layout.go
1430
weed/pb/master_pb/master.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,108 @@ |
|||||
|
package shell |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
"flag" |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/types" |
||||
|
"io" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
Commands = append(Commands, &commandVolumeTierMove{}) |
||||
|
} |
||||
|
|
||||
|
type commandVolumeTierMove struct { |
||||
|
} |
||||
|
|
||||
|
func (c *commandVolumeTierMove) Name() string { |
||||
|
return "volume.tier.upload" |
||||
|
} |
||||
|
|
||||
|
func (c *commandVolumeTierMove) Help() string { |
||||
|
return `change a volume from one disk type to another |
||||
|
|
||||
|
volume.tier.move -source=hdd -target=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h] |
||||
|
volume.tier.move -target=hdd [-collection=""] -volumeId=<volume_id> |
||||
|
|
||||
|
` |
||||
|
} |
||||
|
|
||||
|
func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
||||
|
|
||||
|
if err = commandEnv.confirmIsLocked(); err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
||||
|
volumeId := tierCommand.Int("volumeId", 0, "the volume id") |
||||
|
collection := tierCommand.String("collection", "", "the collection name") |
||||
|
fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") |
||||
|
quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period") |
||||
|
source := tierCommand.String("fromDiskType", "", "the source disk type") |
||||
|
target := tierCommand.String("toDiskType", "", "the target disk type") |
||||
|
if err = tierCommand.Parse(args); err != nil { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
if *source == *target { |
||||
|
return fmt.Errorf("source tier %s is the same as target tier %s", *source, *target) |
||||
|
} |
||||
|
|
||||
|
vid := needle.VolumeId(*volumeId) |
||||
|
|
||||
|
// volumeId is provided
|
||||
|
if vid != 0 { |
||||
|
// return doVolumeTierMove(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
|
||||
|
} |
||||
|
|
||||
|
// apply to all volumes in the collection
|
||||
|
// reusing collectVolumeIdsForEcEncode for now
|
||||
|
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, *source, *collection, *fullPercentage, *quietPeriod) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
fmt.Printf("tier move volumes: %v\n", volumeIds) |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, sourceTier string, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { |
||||
|
|
||||
|
var resp *master_pb.VolumeListResponse |
||||
|
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { |
||||
|
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) |
||||
|
return err |
||||
|
}) |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
quietSeconds := int64(quietPeriod / time.Second) |
||||
|
nowUnixSeconds := time.Now().Unix() |
||||
|
|
||||
|
fmt.Printf("collect %s volumes quiet for: %d seconds\n", sourceTier, quietSeconds) |
||||
|
|
||||
|
vidMap := make(map[uint32]bool) |
||||
|
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { |
||||
|
for _, diskInfo := range dn.DiskInfos { |
||||
|
for _, v := range diskInfo.VolumeInfos { |
||||
|
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.DiskType(v.DiskType) == types.ToDiskType(sourceTier) { |
||||
|
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 { |
||||
|
vidMap[v.Id] = true |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
}) |
||||
|
|
||||
|
for vid := range vidMap { |
||||
|
vids = append(vids, needle.VolumeId(vid)) |
||||
|
} |
||||
|
|
||||
|
return |
||||
|
} |
@ -1,4 +1,4 @@ |
|||||
package storage |
|
||||
|
package types |
||||
|
|
||||
import ( |
import ( |
||||
"strings" |
"strings" |
@ -0,0 +1,275 @@ |
|||||
|
package topology |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/types" |
||||
|
"github.com/chrislusf/seaweedfs/weed/util" |
||||
|
"sync" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/storage" |
||||
|
) |
||||
|
|
||||
|
type Disk struct { |
||||
|
NodeImpl |
||||
|
volumes map[needle.VolumeId]storage.VolumeInfo |
||||
|
ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo |
||||
|
ecShardsLock sync.RWMutex |
||||
|
} |
||||
|
|
||||
|
func NewDisk(diskType string) *Disk { |
||||
|
s := &Disk{} |
||||
|
s.id = NodeId(diskType) |
||||
|
s.nodeType = "Disk" |
||||
|
s.diskUsages = newDiskUsages() |
||||
|
s.volumes = make(map[needle.VolumeId]storage.VolumeInfo, 2) |
||||
|
s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo, 2) |
||||
|
s.NodeImpl.value = s |
||||
|
return s |
||||
|
} |
||||
|
|
||||
|
type DiskUsages struct { |
||||
|
sync.RWMutex |
||||
|
usages map[types.DiskType]*DiskUsageCounts |
||||
|
} |
||||
|
|
||||
|
func newDiskUsages() *DiskUsages { |
||||
|
return &DiskUsages{ |
||||
|
usages: make(map[types.DiskType]*DiskUsageCounts), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (d *DiskUsages) negative() (*DiskUsages) { |
||||
|
d.RLock() |
||||
|
defer d.RUnlock() |
||||
|
t := newDiskUsages() |
||||
|
for diskType, b := range d.usages { |
||||
|
a := t.getOrCreateDisk(diskType) |
||||
|
a.volumeCount = - b.volumeCount |
||||
|
a.remoteVolumeCount = - b.remoteVolumeCount |
||||
|
a.activeVolumeCount = - b.activeVolumeCount |
||||
|
a.ecShardCount = - b.ecShardCount |
||||
|
a.maxVolumeCount = - b.maxVolumeCount |
||||
|
|
||||
|
} |
||||
|
return t |
||||
|
} |
||||
|
|
||||
|
func (d *DiskUsages) ToMap() interface{} { |
||||
|
d.RLock() |
||||
|
defer d.RUnlock() |
||||
|
ret := make(map[string]interface{}) |
||||
|
for diskType, diskUsage := range d.usages { |
||||
|
ret[types.DiskType(diskType).String()] = diskUsage.ToMap() |
||||
|
} |
||||
|
return ret |
||||
|
} |
||||
|
|
||||
|
func (d *DiskUsages) FreeSpace() (freeSpace int64) { |
||||
|
d.RLock() |
||||
|
defer d.RUnlock() |
||||
|
for _, diskUsage := range d.usages { |
||||
|
freeSpace += diskUsage.FreeSpace() |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
|
||||
|
func (d *DiskUsages) GetMaxVolumeCount() (maxVolumeCount int64) { |
||||
|
d.RLock() |
||||
|
defer d.RUnlock() |
||||
|
for _, diskUsage := range d.usages { |
||||
|
maxVolumeCount += diskUsage.maxVolumeCount |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
type DiskUsageCounts struct { |
||||
|
volumeCount int64 |
||||
|
remoteVolumeCount int64 |
||||
|
activeVolumeCount int64 |
||||
|
ecShardCount int64 |
||||
|
maxVolumeCount int64 |
||||
|
} |
||||
|
|
||||
|
func (a *DiskUsageCounts) addDiskUsageCounts(b *DiskUsageCounts) { |
||||
|
a.volumeCount += b.volumeCount |
||||
|
a.remoteVolumeCount += b.remoteVolumeCount |
||||
|
a.activeVolumeCount += b.activeVolumeCount |
||||
|
a.ecShardCount += b.ecShardCount |
||||
|
a.maxVolumeCount += b.maxVolumeCount |
||||
|
} |
||||
|
|
||||
|
func (a *DiskUsageCounts) FreeSpace() int64 { |
||||
|
freeVolumeSlotCount := a.maxVolumeCount + a.remoteVolumeCount - a.volumeCount |
||||
|
if a.ecShardCount > 0 { |
||||
|
freeVolumeSlotCount = freeVolumeSlotCount - a.ecShardCount/erasure_coding.DataShardsCount - 1 |
||||
|
} |
||||
|
return freeVolumeSlotCount |
||||
|
} |
||||
|
|
||||
|
func (a *DiskUsageCounts) minus(b *DiskUsageCounts) (*DiskUsageCounts) { |
||||
|
return &DiskUsageCounts{ |
||||
|
volumeCount: a.volumeCount - b.volumeCount, |
||||
|
remoteVolumeCount: a.remoteVolumeCount - b.remoteVolumeCount, |
||||
|
activeVolumeCount: a.activeVolumeCount - b.activeVolumeCount, |
||||
|
ecShardCount: a.ecShardCount - b.ecShardCount, |
||||
|
maxVolumeCount: a.maxVolumeCount - b.maxVolumeCount, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (diskUsage *DiskUsageCounts) ToMap() interface{} { |
||||
|
ret := make(map[string]interface{}) |
||||
|
ret["Volumes"] = diskUsage.volumeCount |
||||
|
ret["EcShards"] = diskUsage.ecShardCount |
||||
|
ret["Max"] = diskUsage.maxVolumeCount |
||||
|
ret["Free"] = diskUsage.FreeSpace() |
||||
|
return ret |
||||
|
} |
||||
|
|
||||
|
func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts { |
||||
|
du.Lock() |
||||
|
defer du.Unlock() |
||||
|
t, found := du.usages[diskType] |
||||
|
if found { |
||||
|
return t |
||||
|
} |
||||
|
t = &DiskUsageCounts{} |
||||
|
du.usages[diskType] = t |
||||
|
return t |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) String() string { |
||||
|
d.RLock() |
||||
|
defer d.RUnlock() |
||||
|
return fmt.Sprintf("Disk:%s, volumes:%v, ecShards:%v, Port:%d, PublicUrl:%s", d.NodeImpl.String(), d.volumes, d.ecShards) |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { |
||||
|
d.Lock() |
||||
|
defer d.Unlock() |
||||
|
return d.doAddOrUpdateVolume(v) |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { |
||||
|
deltaDiskUsages := newDiskUsages() |
||||
|
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(v.DiskType)) |
||||
|
if oldV, ok := d.volumes[v.Id]; !ok { |
||||
|
d.volumes[v.Id] = v |
||||
|
deltaDiskUsage.volumeCount = 1 |
||||
|
if v.IsRemote() { |
||||
|
deltaDiskUsage.remoteVolumeCount = 1 |
||||
|
} |
||||
|
if !v.ReadOnly { |
||||
|
deltaDiskUsage.activeVolumeCount = 1 |
||||
|
} |
||||
|
d.UpAdjustMaxVolumeId(v.Id) |
||||
|
d.UpAdjustDiskUsageDelta(deltaDiskUsages) |
||||
|
isNew = true |
||||
|
} else { |
||||
|
if oldV.IsRemote() != v.IsRemote() { |
||||
|
if v.IsRemote() { |
||||
|
deltaDiskUsage.remoteVolumeCount = 1 |
||||
|
} |
||||
|
if oldV.IsRemote() { |
||||
|
deltaDiskUsage.remoteVolumeCount = -1 |
||||
|
} |
||||
|
d.UpAdjustDiskUsageDelta(deltaDiskUsages) |
||||
|
} |
||||
|
isChangedRO = d.volumes[v.Id].ReadOnly != v.ReadOnly |
||||
|
d.volumes[v.Id] = v |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) GetVolumes() (ret []storage.VolumeInfo) { |
||||
|
d.RLock() |
||||
|
for _, v := range d.volumes { |
||||
|
ret = append(ret, v) |
||||
|
} |
||||
|
d.RUnlock() |
||||
|
return ret |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) { |
||||
|
d.RLock() |
||||
|
defer d.RUnlock() |
||||
|
vInfo, ok := d.volumes[id] |
||||
|
if ok { |
||||
|
return vInfo, nil |
||||
|
} else { |
||||
|
return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) GetDataCenter() *DataCenter { |
||||
|
dn := d.Parent() |
||||
|
rack := dn.Parent() |
||||
|
dcNode := rack.Parent() |
||||
|
dcValue := dcNode.GetValue() |
||||
|
return dcValue.(*DataCenter) |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) GetRack() *Rack { |
||||
|
return d.Parent().Parent().(*NodeImpl).value.(*Rack) |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) GetTopology() *Topology { |
||||
|
p := d.Parent() |
||||
|
for p.Parent() != nil { |
||||
|
p = p.Parent() |
||||
|
} |
||||
|
t := p.(*Topology) |
||||
|
return t |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) ToMap() interface{} { |
||||
|
ret := make(map[string]interface{}) |
||||
|
diskUsage := d.diskUsages.getOrCreateDisk(types.DiskType(d.Id())) |
||||
|
ret["Volumes"] = diskUsage.volumeCount |
||||
|
ret["VolumeIds"] = d.GetVolumeIds() |
||||
|
ret["EcShards"] = diskUsage.ecShardCount |
||||
|
ret["Max"] = diskUsage.maxVolumeCount |
||||
|
ret["Free"] = d.FreeSpace() |
||||
|
return ret |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) FreeSpace() int64 { |
||||
|
t := d.diskUsages.getOrCreateDisk(types.DiskType(d.Id())) |
||||
|
return t.FreeSpace() |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) ToDiskInfo() *master_pb.DiskInfo { |
||||
|
diskUsage := d.diskUsages.getOrCreateDisk(types.DiskType(d.Id())) |
||||
|
m := &master_pb.DiskInfo{ |
||||
|
Type: string(d.Id()), |
||||
|
VolumeCount: uint64(diskUsage.volumeCount), |
||||
|
MaxVolumeCount: uint64(diskUsage.maxVolumeCount), |
||||
|
FreeVolumeCount: uint64(d.FreeSpace()), |
||||
|
ActiveVolumeCount: uint64(diskUsage.activeVolumeCount), |
||||
|
RemoteVolumeCount: uint64(diskUsage.remoteVolumeCount), |
||||
|
} |
||||
|
for _, v := range d.GetVolumes() { |
||||
|
m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage()) |
||||
|
} |
||||
|
for _, ecv := range d.GetEcShards() { |
||||
|
m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage()) |
||||
|
} |
||||
|
return m |
||||
|
} |
||||
|
|
||||
|
// GetVolumeIds returns the human readable volume ids limited to count of max 100.
|
||||
|
func (d *Disk) GetVolumeIds() string { |
||||
|
d.RLock() |
||||
|
defer d.RUnlock() |
||||
|
ids := make([]int, 0, len(d.volumes)) |
||||
|
|
||||
|
for k := range d.volumes { |
||||
|
ids = append(ids, int(k)) |
||||
|
} |
||||
|
|
||||
|
return util.HumanReadableIntsMax(100, ids...) |
||||
|
} |
@ -0,0 +1,84 @@ |
|||||
|
package topology |
||||
|
|
||||
|
import ( |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
||||
|
"github.com/chrislusf/seaweedfs/weed/storage/types" |
||||
|
) |
||||
|
|
||||
|
func (d *Disk) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) { |
||||
|
d.RLock() |
||||
|
for _, ecVolumeInfo := range d.ecShards { |
||||
|
ret = append(ret, ecVolumeInfo) |
||||
|
} |
||||
|
d.RUnlock() |
||||
|
return ret |
||||
|
} |
||||
|
|
||||
|
func (d *Disk) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) { |
||||
|
d.ecShardsLock.Lock() |
||||
|
defer d.ecShardsLock.Unlock() |
||||
|
|
||||
|
delta := 0 |
||||
|
if existing, ok := d.ecShards[s.VolumeId]; !ok { |
||||
|
d.ecShards[s.VolumeId] = s |
||||
|
delta = s.ShardBits.ShardIdCount() |
||||
|
} else { |
||||
|
oldCount := existing.ShardBits.ShardIdCount() |
||||
|
existing.ShardBits = existing.ShardBits.Plus(s.ShardBits) |
||||
|
delta = existing.ShardBits.ShardIdCount() - oldCount |
||||
|
} |
||||
|
|
||||
|
deltaDiskUsages := newDiskUsages() |
||||
|
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(d.Id())) |
||||
|
deltaDiskUsage.ecShardCount = int64(delta) |
||||
|
d.UpAdjustDiskUsageDelta(deltaDiskUsages) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) { |
||||
|
d.ecShardsLock.Lock() |
||||
|
defer d.ecShardsLock.Unlock() |
||||
|
|
||||
|
if existing, ok := d.ecShards[s.VolumeId]; ok { |
||||
|
oldCount := existing.ShardBits.ShardIdCount() |
||||
|
existing.ShardBits = existing.ShardBits.Minus(s.ShardBits) |
||||
|
delta := existing.ShardBits.ShardIdCount() - oldCount |
||||
|
|
||||
|
deltaDiskUsages := newDiskUsages() |
||||
|
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.DiskType(d.Id())) |
||||
|
deltaDiskUsage.ecShardCount = int64(delta) |
||||
|
d.UpAdjustDiskUsageDelta(deltaDiskUsages) |
||||
|
|
||||
|
if existing.ShardBits.ShardIdCount() == 0 { |
||||
|
delete(d.ecShards, s.VolumeId) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func (d *Disk) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) { |
||||
|
|
||||
|
// check whether normal volumes has this volume id
|
||||
|
d.RLock() |
||||
|
_, ok := d.volumes[id] |
||||
|
if ok { |
||||
|
hasVolumeId = true |
||||
|
} |
||||
|
d.RUnlock() |
||||
|
|
||||
|
if hasVolumeId { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// check whether ec shards has this volume id
|
||||
|
d.ecShardsLock.RLock() |
||||
|
_, ok = d.ecShards[id] |
||||
|
if ok { |
||||
|
hasVolumeId = true |
||||
|
} |
||||
|
d.ecShardsLock.RUnlock() |
||||
|
|
||||
|
return |
||||
|
|
||||
|
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue