Browse Source
fix(worker): pass compaction revision and file sizes in EC volume copy (#8835)
fix(worker): pass compaction revision and file sizes in EC volume copy (#8835)
* fix(worker): pass compaction revision and file sizes in EC volume copy The worker EC task was sending CopyFile requests without the current compaction revision (defaulting to 0) and with StopOffset set to math.MaxInt64. After a vacuum compaction this caused the volume server to reject the copy or return stale data. Read the volume file status first and forward the compaction revision and actual file sizes so the copy is consistent with the compacted volume. * propagate erasure coding task context * fix(worker): validate volume file status and detect short copies Reject zero dat file size from ReadVolumeFileStatus — a zero-sized snapshot would produce 0-byte copies and broken EC shards. After streaming, verify totalBytes matches the expected stopOffset and return an error on short copies instead of logging success. * fix(worker): reject zero idx file size in volume status validation A non-empty dat with zero idx indicates an empty or corrupt volume. Without this guard, copyFileFromSource gets stopOffset=0, produces a 0-byte .idx, passes the short-copy check, and generateEcShardsLocally runs against a volume with no index. * fix fake plugin volume file status * fix plugin volume balance test fixturespull/8839/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 264 additions and 29 deletions
-
97test/plugin_workers/fake_volume_server.go
-
15test/plugin_workers/volume_balance/execution_test.go
-
73weed/worker/tasks/erasure_coding/ec_task.go
-
108weed/worker/tasks/erasure_coding/ec_task_test.go
@ -0,0 +1,108 @@ |
|||
package erasure_coding |
|||
|
|||
import ( |
|||
"context" |
|||
"io" |
|||
"net/http" |
|||
"os" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework" |
|||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" |
|||
"github.com/stretchr/testify/require" |
|||
"google.golang.org/grpc" |
|||
"google.golang.org/grpc/credentials/insecure" |
|||
) |
|||
|
|||
func TestCopyVolumeFilesToWorkerUsesCurrentCompactionRevision(t *testing.T) { |
|||
if testing.Short() { |
|||
t.Skip("skipping integration test in short mode") |
|||
} |
|||
|
|||
clusterHarness := framework.StartVolumeCluster(t, matrix.P1()) |
|||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress()) |
|||
defer conn.Close() |
|||
|
|||
const volumeID = uint32(951) |
|||
framework.AllocateVolume(t, grpcClient, volumeID, "") |
|||
|
|||
httpClient := framework.NewHTTPClient() |
|||
|
|||
liveFID := framework.NewFileID(volumeID, 1001, 0x1111AAAA) |
|||
liveUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), liveFID, []byte("live-payload-for-ec-copy")) |
|||
_ = framework.ReadAllAndClose(t, liveUploadResp) |
|||
require.Equal(t, http.StatusCreated, liveUploadResp.StatusCode) |
|||
|
|||
deletedFID := framework.NewFileID(volumeID, 1002, 0x2222BBBB) |
|||
deletedUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), deletedFID, []byte("deleted-payload-for-vacuum")) |
|||
_ = framework.ReadAllAndClose(t, deletedUploadResp) |
|||
require.Equal(t, http.StatusCreated, deletedUploadResp.StatusCode) |
|||
|
|||
deleteReq, err := http.NewRequest(http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+deletedFID, nil) |
|||
require.NoError(t, err) |
|||
deleteResp := framework.DoRequest(t, httpClient, deleteReq) |
|||
_ = framework.ReadAllAndClose(t, deleteResp) |
|||
require.Equal(t, http.StatusAccepted, deleteResp.StatusCode) |
|||
|
|||
compactVolumeOnce(t, grpcClient, volumeID) |
|||
|
|||
task := NewErasureCodingTask( |
|||
"copy-after-compaction", |
|||
clusterHarness.VolumeServerAddress(), |
|||
volumeID, |
|||
"", |
|||
grpc.WithTransportCredentials(insecure.NewCredentials()), |
|||
) |
|||
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
|||
defer cancel() |
|||
|
|||
require.NoError(t, task.markVolumeReadonly(ctx)) |
|||
|
|||
fileStatus, err := task.readSourceVolumeFileStatus(ctx) |
|||
require.NoError(t, err) |
|||
require.Greater(t, fileStatus.GetCompactionRevision(), uint32(0)) |
|||
|
|||
localFiles, err := task.copyVolumeFilesToWorker(ctx, t.TempDir()) |
|||
require.NoError(t, err) |
|||
|
|||
datInfo, err := os.Stat(localFiles["dat"]) |
|||
require.NoError(t, err) |
|||
require.Equal(t, int64(fileStatus.GetDatFileSize()), datInfo.Size()) |
|||
|
|||
idxInfo, err := os.Stat(localFiles["idx"]) |
|||
require.NoError(t, err) |
|||
require.Equal(t, int64(fileStatus.GetIdxFileSize()), idxInfo.Size()) |
|||
} |
|||
|
|||
func compactVolumeOnce(t *testing.T, grpcClient volume_server_pb.VolumeServerClient, volumeID uint32) { |
|||
t.Helper() |
|||
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
|||
defer cancel() |
|||
|
|||
compactStream, err := grpcClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{ |
|||
VolumeId: volumeID, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
for { |
|||
_, err = compactStream.Recv() |
|||
if err == io.EOF { |
|||
break |
|||
} |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
_, err = grpcClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{ |
|||
VolumeId: volumeID, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
_, err = grpcClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{ |
|||
VolumeId: volumeID, |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue