Browse Source

[master] avoid timeout when assigning for main request with filter by DC or rack (#6291)

* avoid timeout when assigning for main request with filter by DC or rack

https://github.com/seaweedfs/seaweedfs/issues/6290

* use constant NoWritableVolumes
pull/6299/head
Konstantin Lebedev 4 weeks ago
committed by GitHub
parent
commit
e2e97db917
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 4
      weed/server/master_grpc_server_assign.go
  2. 2
      weed/topology/topology.go
  3. 6
      weed/topology/volume_layout.go

4
weed/server/master_grpc_server_assign.go

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/stats"
"strings"
"time" "time"
"github.com/seaweedfs/raft" "github.com/seaweedfs/raft"
@ -100,6 +101,9 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
glog.V(1).Infof("assign %v %v: %v", req, option.String(), err) glog.V(1).Infof("assign %v %v: %v", req, option.String(), err)
stats.MasterPickForWriteErrorCounter.Inc() stats.MasterPickForWriteErrorCounter.Inc()
lastErr = err lastErr = err
if (req.DataCenter != "" || req.Rack != "") && strings.Contains(err.Error(), topology.NoWritableVolumes) {
break
}
time.Sleep(200 * time.Millisecond) time.Sleep(200 * time.Millisecond)
continue continue
} }

2
weed/topology/topology.go

@ -254,7 +254,7 @@ func (t *Topology) PickForWrite(requestedCount uint64, option *VolumeGrowOption,
return "", 0, nil, shouldGrow, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err) return "", 0, nil, shouldGrow, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
} }
if volumeLocationList == nil || volumeLocationList.Length() == 0 { if volumeLocationList == nil || volumeLocationList.Length() == 0 {
return "", 0, nil, shouldGrow, fmt.Errorf("%s available for collection:%s replication:%s ttl:%s", noWritableVolumes, option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
return "", 0, nil, shouldGrow, fmt.Errorf("%s available for collection:%s replication:%s ttl:%s", NoWritableVolumes, option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
} }
nextFileId := t.Sequence.NextFileId(requestedCount) nextFileId := t.Sequence.NextFileId(requestedCount)
fileId = needle.NewFileId(vid, nextFileId, rand.Uint32()).String() fileId = needle.NewFileId(vid, nextFileId, rand.Uint32()).String()

6
weed/topology/volume_layout.go

@ -31,7 +31,7 @@ const (
readOnlyState volumeState = "ReadOnly" readOnlyState volumeState = "ReadOnly"
oversizedState = "Oversized" oversizedState = "Oversized"
crowdedState = "Crowded" crowdedState = "Crowded"
noWritableVolumes = "No writable volumes"
NoWritableVolumes = "No writable volumes"
) )
type stateIndicator func(copyState) bool type stateIndicator func(copyState) bool
@ -302,7 +302,7 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (vi
lenWriters := len(vl.writables) lenWriters := len(vl.writables)
if lenWriters <= 0 { if lenWriters <= 0 {
return 0, 0, nil, true, fmt.Errorf("%s", noWritableVolumes)
return 0, 0, nil, true, fmt.Errorf("%s", NoWritableVolumes)
} }
if option.DataCenter == "" && option.Rack == "" && option.DataNode == "" { if option.DataCenter == "" && option.Rack == "" && option.DataNode == "" {
vid := vl.writables[rand.IntN(lenWriters)] vid := vl.writables[rand.IntN(lenWriters)]
@ -337,7 +337,7 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (vi
return return
} }
} }
return vid, count, locationList, true, fmt.Errorf("%s in DataCenter:%v Rack:%v DataNode:%v", noWritableVolumes, option.DataCenter, option.Rack, option.DataNode)
return vid, count, locationList, true, fmt.Errorf("%s in DataCenter:%v Rack:%v DataNode:%v", NoWritableVolumes, option.DataCenter, option.Rack, option.DataNode)
} }
func (vl *VolumeLayout) HasGrowRequest() bool { func (vl *VolumeLayout) HasGrowRequest() bool {

Loading…
Cancel
Save