Browse Source
Implement RPC skeleton for regular/EC volumes scrubbing. (#8187)
Implement RPC skeleton for regular/EC volumes scrubbing. (#8187)
* Implement RPC skeleton for regular/EC volumes scrubbing. See https://github.com/seaweedfs/seaweedfs/issues/8018 for details. * Minor proto improvements for `ScrubVolume()`, `ScrubEcVolume()`: - Add fields for scrubbing details in `ScrubVolumeResponse` and `ScrubEcVolumeResponse`, instead of reporting these through RPC errors. - Return a list of broken shards when scrubbing EC volumes, via `EcShardInfo'.pull/8189/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 949 additions and 295 deletions
-
38weed/pb/volume_server.proto
-
938weed/pb/volume_server_pb/volume_server.pb.go
-
78weed/pb/volume_server_pb/volume_server_grpc.pb.go
-
1weed/server/volume_grpc_erasure_coding.go
-
133weed/server/volume_grpc_scrub.go
-
33weed/storage/disk_location.go
-
23weed/storage/disk_location_test.go
938
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,133 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage/needle" |
|||
) |
|||
|
|||
func (vs *VolumeServer) ScrubVolume(ctx context.Context, req *volume_server_pb.ScrubVolumeRequest) (*volume_server_pb.ScrubVolumeResponse, error) { |
|||
vids := []needle.VolumeId{} |
|||
if len(req.GetVolumeIds()) == 0 { |
|||
for _, l := range vs.store.Locations { |
|||
vids = append(vids, l.VolumeIds()...) |
|||
} |
|||
} else { |
|||
for _, vid := range req.GetVolumeIds() { |
|||
vids = append(vids, needle.VolumeId(vid)) |
|||
} |
|||
} |
|||
|
|||
var details []string |
|||
var totalVolumes, totalFiles uint64 |
|||
var brokenVolumeIds []uint32 |
|||
for _, vid := range vids { |
|||
v := vs.store.GetVolume(vid) |
|||
if v == nil { |
|||
return nil, fmt.Errorf("volume id %d not found", vid) |
|||
} |
|||
|
|||
var files uint64 |
|||
var serrs []error |
|||
switch m := req.GetMode(); m { |
|||
case volume_server_pb.VolumeScrubMode_INDEX: |
|||
files, serrs = scrubVolumeIndex(ctx, v) |
|||
case volume_server_pb.VolumeScrubMode_FULL: |
|||
files, serrs = scrubVolumeFull(ctx, v) |
|||
default: |
|||
return nil, fmt.Errorf("unsupported volume scrub mode %d", m) |
|||
} |
|||
|
|||
totalVolumes += 1 |
|||
totalFiles += files |
|||
if len(serrs) != 0 { |
|||
brokenVolumeIds = append(brokenVolumeIds, uint32(vid)) |
|||
for _, err := range serrs { |
|||
details = append(details, err.Error()) |
|||
} |
|||
} |
|||
} |
|||
|
|||
res := &volume_server_pb.ScrubVolumeResponse{ |
|||
TotalVolumes: totalVolumes, |
|||
TotalFiles: totalFiles, |
|||
BrokenVolumeIds: brokenVolumeIds, |
|||
Details: details, |
|||
} |
|||
return res, nil |
|||
} |
|||
|
|||
func scrubVolumeIndex(ctx context.Context, v *storage.Volume) (uint64, []error) { |
|||
return 0, []error{fmt.Errorf("scrubVolumeIndex(): not implemented")} |
|||
} |
|||
|
|||
func scrubVolumeFull(ctx context.Context, v *storage.Volume) (uint64, []error) { |
|||
return 0, []error{fmt.Errorf("scrubVolumeFull(): not implemented")} |
|||
} |
|||
|
|||
func (vs *VolumeServer) ScrubEcVolume(ctx context.Context, req *volume_server_pb.ScrubEcVolumeRequest) (*volume_server_pb.ScrubEcVolumeResponse, error) { |
|||
vids := []needle.VolumeId{} |
|||
if len(req.GetVolumeIds()) == 0 { |
|||
for _, l := range vs.store.Locations { |
|||
vids = append(vids, l.EcVolumeIds()...) |
|||
} |
|||
} else { |
|||
for _, vid := range req.GetVolumeIds() { |
|||
vids = append(vids, needle.VolumeId(vid)) |
|||
} |
|||
} |
|||
|
|||
var details []string |
|||
var totalVolumes, totalFiles uint64 |
|||
var brokenVolumeIds []uint32 |
|||
var brokenShardInfos []*volume_server_pb.EcShardInfo |
|||
for _, vid := range vids { |
|||
v, found := vs.store.FindEcVolume(vid) |
|||
if !found { |
|||
return nil, fmt.Errorf("EC volume id %d not found", vid) |
|||
} |
|||
|
|||
var files uint64 |
|||
var shardInfos []*volume_server_pb.EcShardInfo |
|||
var serrs []error |
|||
switch m := req.GetMode(); m { |
|||
case volume_server_pb.VolumeScrubMode_INDEX: |
|||
files, shardInfos, serrs = scrubEcVolumeIndex(v) |
|||
case volume_server_pb.VolumeScrubMode_FULL: |
|||
files, shardInfos, serrs = scrubEcVolumeFull(ctx, v) |
|||
default: |
|||
return nil, fmt.Errorf("unsupported EC volume scrub mode %d", m) |
|||
} |
|||
|
|||
totalVolumes += 1 |
|||
totalFiles += files |
|||
if len(serrs) != 0 || len(shardInfos) != 0 { |
|||
brokenVolumeIds = append(brokenVolumeIds, uint32(vid)) |
|||
brokenShardInfos = append(brokenShardInfos, shardInfos...) |
|||
for _, err := range serrs { |
|||
details = append(details, err.Error()) |
|||
} |
|||
} |
|||
} |
|||
|
|||
res := &volume_server_pb.ScrubEcVolumeResponse{ |
|||
TotalVolumes: totalVolumes, |
|||
TotalFiles: totalFiles, |
|||
BrokenVolumeIds: brokenVolumeIds, |
|||
BrokenShardInfos: brokenShardInfos, |
|||
Details: details, |
|||
} |
|||
return res, nil |
|||
} |
|||
|
|||
func scrubEcVolumeIndex(ecv *erasure_coding.EcVolume) (uint64, []*volume_server_pb.EcShardInfo, []error) { |
|||
return 0, nil, []error{fmt.Errorf("scrubEcVolumeIndex(): not implemented")} |
|||
} |
|||
|
|||
func scrubEcVolumeFull(ctx context.Context, v *erasure_coding.EcVolume) (uint64, []*volume_server_pb.EcShardInfo, []error) { |
|||
return 0, nil, []error{fmt.Errorf("scrubEcVolumeFull(): not implemented")} |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue