Browse Source
feat: CP11A-2 coordinated expand protocol for replicated block volumes
feat: CP11A-2 coordinated expand protocol for replicated block volumes
Two-phase prepare/commit/cancel protocol ensures all replicas expand atomically. Standalone volumes use direct-commit (unchanged behavior). Engine: PrepareExpand/CommitExpand/CancelExpand with on-disk PreparedSize+ExpandEpoch in superblock, crash recovery clears stale prepare state on open, v.mu serializes concurrent expand operations. Proto: 3 new RPCs (PrepareExpand/CommitExpand/CancelExpandBlockVolume). Coordinator: expandClean flag pattern — ReleaseExpandInflight only on clean success or full cancel. Partial replica commit failure calls MarkExpandFailed (keeps ExpandInProgress=true, suppresses heartbeat size updates). ClearExpandFailed for manual reconciliation. Registry: AcquireExpandInflight records PendingExpandSize+ExpandEpoch. ExpandFailed state blocks new expands until cleared. Tests: 15 engine + 4 VS + 10 coordinator + heartbeat suppression regression + updated QA CP82/durability tests with prepare/commit mocks. Also includes CP11A-1 remaining: QA storage profile tests, QA io_backend config tests, testrunner perf-baseline scenarios and coordinated-expand actions. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>feature/sw-block
36 changed files with 4044 additions and 200 deletions
-
29weed/pb/volume_server.proto
-
511weed/pb/volume_server_pb/volume_server.pb.go
-
114weed/pb/volume_server_pb/volume_server_grpc.pb.go
-
75weed/server/master_block_registry.go
-
95weed/server/master_grpc_server_block.go
-
492weed/server/master_grpc_server_block_test.go
-
46weed/server/master_server.go
-
29weed/server/master_server_handlers_block.go
-
58weed/server/qa_block_cp82_adversarial_test.go
-
9weed/server/qa_block_durability_test.go
-
54weed/server/volume_grpc_block.go
-
50weed/server/volume_grpc_block_test.go
-
30weed/server/volume_server_block.go
-
21weed/storage/blockvol/blockapi/client.go
-
10weed/storage/blockvol/blockapi/types.go
-
151weed/storage/blockvol/blockvol.go
-
303weed/storage/blockvol/expand_test.go
-
588weed/storage/blockvol/qa_expand_test.go
-
228weed/storage/blockvol/qa_iobackend_config_test.go
-
567weed/storage/blockvol/qa_storage_profile_test.go
-
20weed/storage/blockvol/superblock.go
-
16weed/storage/blockvol/testrunner/actions/bench.go
-
15weed/storage/blockvol/testrunner/actions/block.go
-
6weed/storage/blockvol/testrunner/actions/database.go
-
234weed/storage/blockvol/testrunner/actions/devops.go
-
22weed/storage/blockvol/testrunner/actions/devops_test.go
-
28weed/storage/blockvol/testrunner/actions/helpers.go
-
42weed/storage/blockvol/testrunner/actions/io.go
-
59weed/storage/blockvol/testrunner/actions/iscsi.go
-
2weed/storage/blockvol/testrunner/actions/metrics.go
-
69weed/storage/blockvol/testrunner/engine.go
-
144weed/storage/blockvol/testrunner/engine_test.go
-
18weed/storage/blockvol/testrunner/infra/target.go
-
1weed/storage/blockvol/testrunner/registry.go
-
78weed/storage/blockvol/testrunner/scenarios/cp103-perf-baseline.yaml
-
30weed/storage/blockvol/testrunner/scenarios/cp85-perf-baseline.yaml
@ -0,0 +1,303 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"bytes" |
|||
"path/filepath" |
|||
"testing" |
|||
) |
|||
|
|||
const ( |
|||
expandVolSize = 1024 * 1024 // 1MB
|
|||
expandBlkSize = 4096 |
|||
expandWALSize = 64 * 1024 // 64KB
|
|||
expandNewSize = 2 * 1024 * 1024 // 2MB
|
|||
) |
|||
|
|||
func createExpandTestVol(t *testing.T) (*BlockVol, string) { |
|||
t.Helper() |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "test.blk") |
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: expandVolSize, |
|||
BlockSize: expandBlkSize, |
|||
WALSize: expandWALSize, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("create: %v", err) |
|||
} |
|||
return vol, path |
|||
} |
|||
|
|||
func TestExpand_Standalone_DirectCommit(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.Expand(expandNewSize); err != nil { |
|||
t.Fatalf("expand: %v", err) |
|||
} |
|||
if vol.Info().VolumeSize != expandNewSize { |
|||
t.Fatalf("VolumeSize: got %d, want %d", vol.Info().VolumeSize, expandNewSize) |
|||
} |
|||
ps, ee := vol.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState: got (%d,%d), want (0,0)", ps, ee) |
|||
} |
|||
} |
|||
|
|||
func TestExpand_Standalone_Idempotent(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.Expand(expandVolSize); err != nil { |
|||
t.Fatalf("same-size expand should be no-op: %v", err) |
|||
} |
|||
if vol.Info().VolumeSize != expandVolSize { |
|||
t.Fatalf("VolumeSize changed: %d", vol.Info().VolumeSize) |
|||
} |
|||
} |
|||
|
|||
func TestExpand_Standalone_ShrinkRejected(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
err := vol.Expand(expandVolSize / 2) |
|||
if err != ErrShrinkNotSupported { |
|||
t.Fatalf("expected ErrShrinkNotSupported, got %v", err) |
|||
} |
|||
} |
|||
|
|||
func TestExpand_Standalone_SurvivesReopen(t *testing.T) { |
|||
vol, path := createExpandTestVol(t) |
|||
|
|||
if err := vol.Expand(expandNewSize); err != nil { |
|||
t.Fatalf("expand: %v", err) |
|||
} |
|||
vol.Close() |
|||
|
|||
vol2, err := OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("reopen: %v", err) |
|||
} |
|||
defer vol2.Close() |
|||
|
|||
if vol2.Info().VolumeSize != expandNewSize { |
|||
t.Fatalf("VolumeSize after reopen: got %d, want %d", vol2.Info().VolumeSize, expandNewSize) |
|||
} |
|||
} |
|||
|
|||
func TestPrepareExpand_Success(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 42); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
if vol.Info().VolumeSize != expandVolSize { |
|||
t.Fatalf("VolumeSize should be unchanged: %d", vol.Info().VolumeSize) |
|||
} |
|||
ps, ee := vol.ExpandState() |
|||
if ps != expandNewSize || ee != 42 { |
|||
t.Fatalf("ExpandState: got (%d,%d), want (%d,42)", ps, ee, expandNewSize) |
|||
} |
|||
} |
|||
|
|||
func TestPrepareExpand_WriteBeyondOldSize_Rejected(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
newLBA := uint64(expandVolSize / expandBlkSize) |
|||
data := make([]byte, expandBlkSize) |
|||
err := vol.WriteLBA(newLBA, data) |
|||
if err == nil { |
|||
t.Fatal("write beyond old size should be rejected while in prepared state") |
|||
} |
|||
} |
|||
|
|||
func TestPrepareExpand_WriteWithinOldSize_OK(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
data := make([]byte, expandBlkSize) |
|||
data[0] = 0xCC |
|||
if err := vol.WriteLBA(0, data); err != nil { |
|||
t.Fatalf("write within old size: %v", err) |
|||
} |
|||
got, err := vol.ReadLBA(0, expandBlkSize) |
|||
if err != nil { |
|||
t.Fatalf("read: %v", err) |
|||
} |
|||
if !bytes.Equal(got, data) { |
|||
t.Fatal("data mismatch") |
|||
} |
|||
} |
|||
|
|||
func TestCommitExpand_Success(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 7); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
if err := vol.CommitExpand(7); err != nil { |
|||
t.Fatalf("commit: %v", err) |
|||
} |
|||
|
|||
if vol.Info().VolumeSize != expandNewSize { |
|||
t.Fatalf("VolumeSize: got %d, want %d", vol.Info().VolumeSize, expandNewSize) |
|||
} |
|||
ps, ee := vol.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState: got (%d,%d), want (0,0)", ps, ee) |
|||
} |
|||
} |
|||
|
|||
func TestCommitExpand_WriteBeyondNewSize_OK(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
if err := vol.CommitExpand(1); err != nil { |
|||
t.Fatalf("commit: %v", err) |
|||
} |
|||
|
|||
newLBA := uint64(expandVolSize / expandBlkSize) |
|||
data := make([]byte, expandBlkSize) |
|||
data[0] = 0xDD |
|||
if err := vol.WriteLBA(newLBA, data); err != nil { |
|||
t.Fatalf("write in expanded region: %v", err) |
|||
} |
|||
got, err := vol.ReadLBA(newLBA, expandBlkSize) |
|||
if err != nil { |
|||
t.Fatalf("read: %v", err) |
|||
} |
|||
if !bytes.Equal(got, data) { |
|||
t.Fatal("data mismatch in expanded region") |
|||
} |
|||
} |
|||
|
|||
func TestCommitExpand_EpochMismatch_Rejected(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 5); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
err := vol.CommitExpand(99) |
|||
if err != ErrExpandEpochMismatch { |
|||
t.Fatalf("expected ErrExpandEpochMismatch, got %v", err) |
|||
} |
|||
if vol.Info().VolumeSize != expandVolSize { |
|||
t.Fatalf("VolumeSize should be unchanged: %d", vol.Info().VolumeSize) |
|||
} |
|||
} |
|||
|
|||
func TestCancelExpand_ClearsPreparedState(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 3); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
if err := vol.CancelExpand(3); err != nil { |
|||
t.Fatalf("cancel: %v", err) |
|||
} |
|||
|
|||
ps, ee := vol.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState: got (%d,%d), want (0,0)", ps, ee) |
|||
} |
|||
if vol.Info().VolumeSize != expandVolSize { |
|||
t.Fatalf("VolumeSize should be unchanged: %d", vol.Info().VolumeSize) |
|||
} |
|||
} |
|||
|
|||
func TestCancelExpand_WriteStillRejectedInNewRange(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
if err := vol.CancelExpand(1); err != nil { |
|||
t.Fatalf("cancel: %v", err) |
|||
} |
|||
|
|||
newLBA := uint64(expandVolSize / expandBlkSize) |
|||
data := make([]byte, expandBlkSize) |
|||
err := vol.WriteLBA(newLBA, data) |
|||
if err == nil { |
|||
t.Fatal("write in expanded region should still be rejected after cancel") |
|||
} |
|||
} |
|||
|
|||
func TestPrepareExpand_AlreadyInFlight_Rejected(t *testing.T) { |
|||
vol, _ := createExpandTestVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("first prepare: %v", err) |
|||
} |
|||
err := vol.PrepareExpand(expandNewSize*2, 2) |
|||
if err != ErrExpandAlreadyInFlight { |
|||
t.Fatalf("expected ErrExpandAlreadyInFlight, got %v", err) |
|||
} |
|||
} |
|||
|
|||
func TestRecovery_PreparedState_Cleared(t *testing.T) { |
|||
vol, path := createExpandTestVol(t) |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 10); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
vol.Close() |
|||
|
|||
vol2, err := OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("reopen: %v", err) |
|||
} |
|||
defer vol2.Close() |
|||
|
|||
ps, ee := vol2.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState after reopen: got (%d,%d), want (0,0)", ps, ee) |
|||
} |
|||
if vol2.Info().VolumeSize != expandVolSize { |
|||
t.Fatalf("VolumeSize should be original after recovery: %d", vol2.Info().VolumeSize) |
|||
} |
|||
} |
|||
|
|||
func TestExpand_WithProfile_Single(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "profile.blk") |
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: expandVolSize, |
|||
BlockSize: expandBlkSize, |
|||
WALSize: expandWALSize, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("create: %v", err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
if vol.Profile() != ProfileSingle { |
|||
t.Fatalf("profile: got %d, want %d", vol.Profile(), ProfileSingle) |
|||
} |
|||
if err := vol.Expand(expandNewSize); err != nil { |
|||
t.Fatalf("expand with single profile: %v", err) |
|||
} |
|||
if vol.Info().VolumeSize != expandNewSize { |
|||
t.Fatalf("VolumeSize: got %d, want %d", vol.Info().VolumeSize, expandNewSize) |
|||
} |
|||
} |
|||
@ -0,0 +1,588 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"bytes" |
|||
"errors" |
|||
"path/filepath" |
|||
"sync" |
|||
"sync/atomic" |
|||
"testing" |
|||
"time" |
|||
) |
|||
|
|||
// =============================================================================
|
|||
// CP11A-2 QA Adversarial Tests — Coordinated Expand
|
|||
// =============================================================================
|
|||
|
|||
// --- Engine-level adversarial tests ---
|
|||
|
|||
func createQAExpandVol(t *testing.T) (*BlockVol, string) { |
|||
t.Helper() |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "qa-expand.blk") |
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: expandVolSize, |
|||
BlockSize: expandBlkSize, |
|||
WALSize: expandWALSize, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("create: %v", err) |
|||
} |
|||
return vol, path |
|||
} |
|||
|
|||
// T1: ConcurrentPrepare — two goroutines race to PrepareExpand;
|
|||
// exactly one must win, the other gets ErrExpandAlreadyInFlight.
|
|||
func TestQA_Expand_ConcurrentPrepare(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
const goroutines = 10 |
|||
var wins, rejects atomic.Int32 |
|||
var wg sync.WaitGroup |
|||
start := make(chan struct{}) |
|||
|
|||
for i := 0; i < goroutines; i++ { |
|||
wg.Add(1) |
|||
epoch := uint64(i + 1) |
|||
go func() { |
|||
defer wg.Done() |
|||
<-start |
|||
err := vol.PrepareExpand(expandNewSize, epoch) |
|||
if err == nil { |
|||
wins.Add(1) |
|||
} else if errors.Is(err, ErrExpandAlreadyInFlight) { |
|||
rejects.Add(1) |
|||
} else { |
|||
t.Errorf("unexpected error: %v", err) |
|||
} |
|||
}() |
|||
} |
|||
|
|||
close(start) |
|||
wg.Wait() |
|||
|
|||
if wins.Load() != 1 { |
|||
t.Fatalf("expected exactly 1 winner, got %d", wins.Load()) |
|||
} |
|||
if rejects.Load() != int32(goroutines-1) { |
|||
t.Fatalf("expected %d rejects, got %d", goroutines-1, rejects.Load()) |
|||
} |
|||
} |
|||
|
|||
// T2: CommitWithoutPrepare — CommitExpand with no prior PrepareExpand.
|
|||
func TestQA_Expand_CommitWithoutPrepare(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
err := vol.CommitExpand(42) |
|||
if !errors.Is(err, ErrNoExpandInFlight) { |
|||
t.Fatalf("expected ErrNoExpandInFlight, got %v", err) |
|||
} |
|||
// VolumeSize must not change.
|
|||
if vol.Info().VolumeSize != expandVolSize { |
|||
t.Fatalf("VolumeSize corrupted: %d", vol.Info().VolumeSize) |
|||
} |
|||
} |
|||
|
|||
// T3: CancelWithoutPrepare — CancelExpand when nothing is in flight.
|
|||
// With epoch=0 (force-cancel), should be a harmless no-op.
|
|||
func TestQA_Expand_CancelWithoutPrepare_ForceEpoch(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
// Force-cancel (epoch=0) when nothing is in flight — should succeed.
|
|||
if err := vol.CancelExpand(0); err != nil { |
|||
t.Fatalf("force-cancel with no inflight should succeed: %v", err) |
|||
} |
|||
ps, ee := vol.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState should be clean: (%d, %d)", ps, ee) |
|||
} |
|||
} |
|||
|
|||
// T4: CancelWithWrongEpoch — CancelExpand with non-zero wrong epoch.
|
|||
func TestQA_Expand_CancelWithWrongEpoch(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 5); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
err := vol.CancelExpand(99) |
|||
if !errors.Is(err, ErrExpandEpochMismatch) { |
|||
t.Fatalf("expected ErrExpandEpochMismatch, got %v", err) |
|||
} |
|||
|
|||
// PreparedSize must still be set (cancel failed).
|
|||
ps, ee := vol.ExpandState() |
|||
if ps != expandNewSize || ee != 5 { |
|||
t.Fatalf("ExpandState should be unchanged: (%d, %d)", ps, ee) |
|||
} |
|||
} |
|||
|
|||
// T5: ForceCancel — epoch=0 cancels regardless of actual epoch.
|
|||
func TestQA_Expand_ForceCancel_IgnoresEpoch(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 777); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
// Force-cancel with epoch=0 should clear regardless.
|
|||
if err := vol.CancelExpand(0); err != nil { |
|||
t.Fatalf("force-cancel: %v", err) |
|||
} |
|||
ps, ee := vol.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState should be cleared: (%d, %d)", ps, ee) |
|||
} |
|||
} |
|||
|
|||
// T6: DoubleCommit — commit, then commit again. Second must fail.
|
|||
func TestQA_Expand_DoubleCommit(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
if err := vol.CommitExpand(1); err != nil { |
|||
t.Fatalf("first commit: %v", err) |
|||
} |
|||
|
|||
// Second commit: PreparedSize is now 0, so ErrNoExpandInFlight.
|
|||
err := vol.CommitExpand(1) |
|||
if !errors.Is(err, ErrNoExpandInFlight) { |
|||
t.Fatalf("expected ErrNoExpandInFlight on double commit, got %v", err) |
|||
} |
|||
} |
|||
|
|||
// T7: PrepareAfterCommit — after a successful prepare+commit cycle,
|
|||
// a new prepare should work (the state machine resets).
|
|||
func TestQA_Expand_PrepareAfterCommit(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
// First cycle: 1MB -> 2MB.
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare1: %v", err) |
|||
} |
|||
if err := vol.CommitExpand(1); err != nil { |
|||
t.Fatalf("commit1: %v", err) |
|||
} |
|||
if vol.Info().VolumeSize != expandNewSize { |
|||
t.Fatalf("size after first commit: %d", vol.Info().VolumeSize) |
|||
} |
|||
|
|||
// Second cycle: 2MB -> 4MB.
|
|||
newSize2 := uint64(4 * 1024 * 1024) |
|||
if err := vol.PrepareExpand(newSize2, 2); err != nil { |
|||
t.Fatalf("prepare2: %v", err) |
|||
} |
|||
if err := vol.CommitExpand(2); err != nil { |
|||
t.Fatalf("commit2: %v", err) |
|||
} |
|||
if vol.Info().VolumeSize != newSize2 { |
|||
t.Fatalf("size after second commit: %d", vol.Info().VolumeSize) |
|||
} |
|||
} |
|||
|
|||
// T8: PrepareAfterCancel — after cancel, a new prepare should succeed.
|
|||
func TestQA_Expand_PrepareAfterCancel(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare1: %v", err) |
|||
} |
|||
if err := vol.CancelExpand(1); err != nil { |
|||
t.Fatalf("cancel: %v", err) |
|||
} |
|||
|
|||
// Second prepare with different epoch should work.
|
|||
if err := vol.PrepareExpand(expandNewSize, 2); err != nil { |
|||
t.Fatalf("prepare2 after cancel: %v", err) |
|||
} |
|||
ps, ee := vol.ExpandState() |
|||
if ps != expandNewSize || ee != 2 { |
|||
t.Fatalf("ExpandState: (%d, %d), want (%d, 2)", ps, ee, expandNewSize) |
|||
} |
|||
} |
|||
|
|||
// T9: PrepareShrink — PrepareExpand with size < current must be rejected.
|
|||
func TestQA_Expand_PrepareShrink(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
err := vol.PrepareExpand(expandVolSize/2, 1) |
|||
if !errors.Is(err, ErrShrinkNotSupported) { |
|||
t.Fatalf("expected ErrShrinkNotSupported, got %v", err) |
|||
} |
|||
} |
|||
|
|||
// T10: PrepareUnaligned — unaligned size rejected.
|
|||
func TestQA_Expand_PrepareUnaligned(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
err := vol.PrepareExpand(expandNewSize+1, 1) |
|||
if !errors.Is(err, ErrAlignment) { |
|||
t.Fatalf("expected ErrAlignment, got %v", err) |
|||
} |
|||
// Must not leave state dirty.
|
|||
ps, ee := vol.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState should be clean after alignment reject: (%d, %d)", ps, ee) |
|||
} |
|||
} |
|||
|
|||
// T11: DataIntegrity — write data before prepare, commit, then verify
|
|||
// data in both old and new regions.
|
|||
func TestQA_Expand_DataIntegrityAcrossCommit(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
// Write to LBA 0 before expand.
|
|||
data := make([]byte, expandBlkSize) |
|||
for i := range data { |
|||
data[i] = 0xAB |
|||
} |
|||
if err := vol.WriteLBA(0, data); err != nil { |
|||
t.Fatalf("write pre-expand: %v", err) |
|||
} |
|||
|
|||
// Prepare + commit.
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
// Write to LBA 0 during prepared state (within old range — allowed).
|
|||
data2 := make([]byte, expandBlkSize) |
|||
for i := range data2 { |
|||
data2[i] = 0xCD |
|||
} |
|||
if err := vol.WriteLBA(0, data2); err != nil { |
|||
t.Fatalf("write during prepared: %v", err) |
|||
} |
|||
|
|||
if err := vol.CommitExpand(1); err != nil { |
|||
t.Fatalf("commit: %v", err) |
|||
} |
|||
|
|||
// Read LBA 0 — should have data2 (0xCD).
|
|||
got, err := vol.ReadLBA(0, expandBlkSize) |
|||
if err != nil { |
|||
t.Fatalf("read LBA 0: %v", err) |
|||
} |
|||
if !bytes.Equal(got, data2) { |
|||
t.Fatalf("data mismatch at LBA 0: got %x, want %x", got[0], data2[0]) |
|||
} |
|||
|
|||
// Write to new region (LBA beyond old size).
|
|||
newLBA := uint64(expandVolSize / expandBlkSize) |
|||
data3 := make([]byte, expandBlkSize) |
|||
for i := range data3 { |
|||
data3[i] = 0xEF |
|||
} |
|||
if err := vol.WriteLBA(newLBA, data3); err != nil { |
|||
t.Fatalf("write new region: %v", err) |
|||
} |
|||
got3, err := vol.ReadLBA(newLBA, expandBlkSize) |
|||
if err != nil { |
|||
t.Fatalf("read new region: %v", err) |
|||
} |
|||
if !bytes.Equal(got3, data3) { |
|||
t.Fatalf("data mismatch in new region") |
|||
} |
|||
} |
|||
|
|||
// T12: RecoveryClearsAndDataSurvives — crash with PreparedSize set,
|
|||
// reopen clears it, old data is intact.
|
|||
func TestQA_Expand_RecoveryClearsAndDataSurvives(t *testing.T) { |
|||
vol, path := createQAExpandVol(t) |
|||
|
|||
// Write data.
|
|||
data := make([]byte, expandBlkSize) |
|||
data[0] = 0x77 |
|||
if err := vol.WriteLBA(0, data); err != nil { |
|||
t.Fatalf("write: %v", err) |
|||
} |
|||
// Flush so data reaches extent.
|
|||
if err := vol.SyncCache(); err != nil { |
|||
t.Fatalf("sync: %v", err) |
|||
} |
|||
time.Sleep(200 * time.Millisecond) // let flusher flush
|
|||
|
|||
// Prepare expand (not committed).
|
|||
if err := vol.PrepareExpand(expandNewSize, 99); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
vol.Close() |
|||
|
|||
// Reopen — recovery should clear PreparedSize.
|
|||
vol2, err := OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("reopen: %v", err) |
|||
} |
|||
defer vol2.Close() |
|||
|
|||
ps, ee := vol2.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("ExpandState after recovery: (%d, %d)", ps, ee) |
|||
} |
|||
if vol2.Info().VolumeSize != expandVolSize { |
|||
t.Fatalf("VolumeSize should be original: %d", vol2.Info().VolumeSize) |
|||
} |
|||
|
|||
// Data written before prepare should survive.
|
|||
got, err := vol2.ReadLBA(0, expandBlkSize) |
|||
if err != nil { |
|||
t.Fatalf("read after recovery: %v", err) |
|||
} |
|||
if got[0] != 0x77 { |
|||
t.Fatalf("data[0]: got %x, want 0x77", got[0]) |
|||
} |
|||
} |
|||
|
|||
// T13: CommittedExpandSurvivesReopen — committed expand persists.
|
|||
func TestQA_Expand_CommittedSurvivesReopen(t *testing.T) { |
|||
vol, path := createQAExpandVol(t) |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
if err := vol.CommitExpand(1); err != nil { |
|||
t.Fatalf("commit: %v", err) |
|||
} |
|||
|
|||
// Write in new region.
|
|||
newLBA := uint64(expandVolSize / expandBlkSize) |
|||
data := make([]byte, expandBlkSize) |
|||
data[0] = 0xAA |
|||
if err := vol.WriteLBA(newLBA, data); err != nil { |
|||
t.Fatalf("write new region: %v", err) |
|||
} |
|||
if err := vol.SyncCache(); err != nil { |
|||
t.Fatalf("sync: %v", err) |
|||
} |
|||
time.Sleep(200 * time.Millisecond) |
|||
vol.Close() |
|||
|
|||
// Reopen.
|
|||
vol2, err := OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("reopen: %v", err) |
|||
} |
|||
defer vol2.Close() |
|||
|
|||
if vol2.Info().VolumeSize != expandNewSize { |
|||
t.Fatalf("VolumeSize: got %d, want %d", vol2.Info().VolumeSize, expandNewSize) |
|||
} |
|||
got, err := vol2.ReadLBA(newLBA, expandBlkSize) |
|||
if err != nil { |
|||
t.Fatalf("read new region: %v", err) |
|||
} |
|||
if got[0] != 0xAA { |
|||
t.Fatalf("data[0]: got %x, want 0xAA", got[0]) |
|||
} |
|||
} |
|||
|
|||
// T14: ExpandOnClosedVolume — all expand ops must return ErrVolumeClosed.
|
|||
func TestQA_Expand_ClosedVolume(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
vol.Close() |
|||
|
|||
if err := vol.Expand(expandNewSize); !errors.Is(err, ErrVolumeClosed) { |
|||
t.Fatalf("Expand on closed: expected ErrVolumeClosed, got %v", err) |
|||
} |
|||
if err := vol.PrepareExpand(expandNewSize, 1); !errors.Is(err, ErrVolumeClosed) { |
|||
t.Fatalf("PrepareExpand on closed: expected ErrVolumeClosed, got %v", err) |
|||
} |
|||
if err := vol.CommitExpand(1); !errors.Is(err, ErrVolumeClosed) { |
|||
t.Fatalf("CommitExpand on closed: expected ErrVolumeClosed, got %v", err) |
|||
} |
|||
if err := vol.CancelExpand(1); !errors.Is(err, ErrVolumeClosed) { |
|||
t.Fatalf("CancelExpand on closed: expected ErrVolumeClosed, got %v", err) |
|||
} |
|||
} |
|||
|
|||
// T15: PrepareExpandSameSize — PrepareExpand with newSize == VolumeSize must fail.
|
|||
// BUG-CP11A2-1 fix: PrepareExpand rejects same-size with ErrSameSize.
|
|||
func TestQA_Expand_PrepareSameSize(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
err := vol.PrepareExpand(expandVolSize, 1) |
|||
if !errors.Is(err, ErrSameSize) { |
|||
t.Fatalf("PrepareExpand(sameSize): expected ErrSameSize, got %v", err) |
|||
} |
|||
// Verify no state was left behind.
|
|||
ps, ee := vol.ExpandState() |
|||
if ps != 0 || ee != 0 { |
|||
t.Fatalf("state leaked: PreparedSize=%d ExpandEpoch=%d", ps, ee) |
|||
} |
|||
} |
|||
|
|||
// T16: ConcurrentPrepareAndWrite — write I/O during PrepareExpand.
|
|||
// Writes within old range must succeed, writes beyond must fail.
|
|||
func TestQA_Expand_ConcurrentWriteDuringPrepare(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
// Start background writes to LBA 0 (within old range).
|
|||
var writeCount atomic.Int32 |
|||
var writeErr atomic.Value |
|||
stopCh := make(chan struct{}) |
|||
go func() { |
|||
data := make([]byte, expandBlkSize) |
|||
for { |
|||
select { |
|||
case <-stopCh: |
|||
return |
|||
default: |
|||
} |
|||
err := vol.WriteLBA(0, data) |
|||
if err != nil { |
|||
writeErr.Store(err) |
|||
return |
|||
} |
|||
writeCount.Add(1) |
|||
} |
|||
}() |
|||
|
|||
// Let writes run briefly.
|
|||
time.Sleep(10 * time.Millisecond) |
|||
|
|||
// PrepareExpand while writes are happening.
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
close(stopCh) |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
// Let a few more writes happen.
|
|||
time.Sleep(10 * time.Millisecond) |
|||
close(stopCh) |
|||
|
|||
if e := writeErr.Load(); e != nil { |
|||
t.Fatalf("write error during prepare: %v", e) |
|||
} |
|||
if writeCount.Load() == 0 { |
|||
t.Fatal("no writes completed during test") |
|||
} |
|||
} |
|||
|
|||
// T17: ExpandStateRaceWithCommit — concurrent ExpandState reads during commit.
|
|||
func TestQA_Expand_ExpandStateRaceWithCommit(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
var wg sync.WaitGroup |
|||
// Concurrent ExpandState readers.
|
|||
for i := 0; i < 5; i++ { |
|||
wg.Add(1) |
|||
go func() { |
|||
defer wg.Done() |
|||
for j := 0; j < 100; j++ { |
|||
ps, ee := vol.ExpandState() |
|||
// Valid states: (expandNewSize, 1) before commit, or (0, 0) after.
|
|||
if ps != 0 && ps != expandNewSize { |
|||
t.Errorf("invalid PreparedSize: %d", ps) |
|||
} |
|||
if ee != 0 && ee != 1 { |
|||
t.Errorf("invalid ExpandEpoch: %d", ee) |
|||
} |
|||
// PreparedSize and ExpandEpoch must be consistent (both set or both cleared).
|
|||
if (ps == 0) != (ee == 0) { |
|||
t.Errorf("inconsistent ExpandState: (%d, %d)", ps, ee) |
|||
} |
|||
} |
|||
}() |
|||
} |
|||
|
|||
// Commit while readers are running.
|
|||
time.Sleep(1 * time.Millisecond) |
|||
if err := vol.CommitExpand(1); err != nil { |
|||
t.Fatalf("commit: %v", err) |
|||
} |
|||
|
|||
wg.Wait() |
|||
} |
|||
|
|||
// T18: TrimDuringPreparedExpand — trim within old range must work.
|
|||
func TestQA_Expand_TrimDuringPrepared(t *testing.T) { |
|||
vol, _ := createQAExpandVol(t) |
|||
defer vol.Close() |
|||
|
|||
// Write data.
|
|||
data := make([]byte, expandBlkSize) |
|||
data[0] = 0xFF |
|||
if err := vol.WriteLBA(0, data); err != nil { |
|||
t.Fatalf("write: %v", err) |
|||
} |
|||
|
|||
// Prepare expand.
|
|||
if err := vol.PrepareExpand(expandNewSize, 1); err != nil { |
|||
t.Fatalf("prepare: %v", err) |
|||
} |
|||
|
|||
// Trim LBA 0 (within old range).
|
|||
if err := vol.Trim(0, expandBlkSize); err != nil { |
|||
t.Fatalf("trim during prepared: %v", err) |
|||
} |
|||
|
|||
// Read should return zeros.
|
|||
got, err := vol.ReadLBA(0, expandBlkSize) |
|||
if err != nil { |
|||
t.Fatalf("read after trim: %v", err) |
|||
} |
|||
zeros := make([]byte, expandBlkSize) |
|||
if !bytes.Equal(got, zeros) { |
|||
t.Fatalf("expected zeros after trim, got %x at [0]", got[0]) |
|||
} |
|||
} |
|||
|
|||
// T19: SuperblockValidate — manually construct superblock with
|
|||
// PreparedSize == VolumeSize and verify Validate() rejects it.
|
|||
func TestQA_Expand_SuperblockValidatePreparedSize(t *testing.T) { |
|||
sb := Superblock{ |
|||
Version: CurrentVersion, |
|||
VolumeSize: 1024 * 1024, |
|||
BlockSize: 4096, |
|||
ExtentSize: 65536, |
|||
WALSize: 65536, |
|||
WALOffset: SuperblockSize, |
|||
PreparedSize: 1024 * 1024, // == VolumeSize, should fail
|
|||
ExpandEpoch: 1, |
|||
} |
|||
copy(sb.Magic[:], MagicSWBK) |
|||
|
|||
if err := sb.Validate(); err == nil { |
|||
t.Fatal("Validate should reject PreparedSize == VolumeSize") |
|||
} |
|||
} |
|||
|
|||
// T20: SuperblockValidate — ExpandEpoch != 0 with PreparedSize == 0.
|
|||
func TestQA_Expand_SuperblockValidateOrphanEpoch(t *testing.T) { |
|||
sb := Superblock{ |
|||
Version: CurrentVersion, |
|||
VolumeSize: 1024 * 1024, |
|||
BlockSize: 4096, |
|||
ExtentSize: 65536, |
|||
WALSize: 65536, |
|||
WALOffset: SuperblockSize, |
|||
PreparedSize: 0, |
|||
ExpandEpoch: 5, // orphan epoch
|
|||
} |
|||
copy(sb.Magic[:], MagicSWBK) |
|||
|
|||
if err := sb.Validate(); err == nil { |
|||
t.Fatal("Validate should reject ExpandEpoch!=0 when PreparedSize==0") |
|||
} |
|||
} |
|||
@ -0,0 +1,228 @@ |
|||
//go:build ignore
|
|||
|
|||
package blockvol |
|||
|
|||
import ( |
|||
"strings" |
|||
"testing" |
|||
) |
|||
|
|||
// =============================================================================
|
|||
// QA Adversarial Tests for IOBackend Config (Item 3)
|
|||
//
|
|||
// Covers: ParseIOBackend, ResolveIOBackend, Validate for IOBackend field,
|
|||
// edge cases, unknown values, io_uring rejection, case insensitivity.
|
|||
// =============================================================================
|
|||
|
|||
// --- ParseIOBackend ---
|
|||
|
|||
func TestQA_ParseIOBackend_ValidInputs(t *testing.T) { |
|||
cases := []struct { |
|||
input string |
|||
want IOBackend |
|||
}{ |
|||
{"auto", IOBackendAuto}, |
|||
{"AUTO", IOBackendAuto}, |
|||
{"Auto", IOBackendAuto}, |
|||
{"", IOBackendAuto}, |
|||
{" auto ", IOBackendAuto}, |
|||
{"standard", IOBackendStandard}, |
|||
{"STANDARD", IOBackendStandard}, |
|||
{"Standard", IOBackendStandard}, |
|||
{" standard ", IOBackendStandard}, |
|||
{"io_uring", IOBackendIOURing}, |
|||
{"IO_URING", IOBackendIOURing}, |
|||
{"Io_Uring", IOBackendIOURing}, |
|||
{"iouring", IOBackendIOURing}, |
|||
{"IOURING", IOBackendIOURing}, |
|||
} |
|||
for _, tc := range cases { |
|||
t.Run(tc.input, func(t *testing.T) { |
|||
got, err := ParseIOBackend(tc.input) |
|||
if err != nil { |
|||
t.Fatalf("ParseIOBackend(%q): unexpected error: %v", tc.input, err) |
|||
} |
|||
if got != tc.want { |
|||
t.Fatalf("ParseIOBackend(%q) = %v, want %v", tc.input, got, tc.want) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func TestQA_ParseIOBackend_InvalidInputs(t *testing.T) { |
|||
invalids := []string{ |
|||
"spdk", |
|||
"uring", |
|||
"io-uring", |
|||
"io_uring_sqpoll", |
|||
"direct", |
|||
"aio", |
|||
"posix", |
|||
"libaio", |
|||
"123", |
|||
"null", |
|||
"none", |
|||
} |
|||
for _, s := range invalids { |
|||
t.Run(s, func(t *testing.T) { |
|||
got, err := ParseIOBackend(s) |
|||
if err == nil { |
|||
t.Fatalf("ParseIOBackend(%q) = %v, want error", s, got) |
|||
} |
|||
if got != IOBackendAuto { |
|||
t.Fatalf("ParseIOBackend(%q) error case should return Auto, got %v", s, got) |
|||
} |
|||
if !strings.Contains(err.Error(), "unknown IOBackend") { |
|||
t.Fatalf("error should mention 'unknown IOBackend', got: %v", err) |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// --- IOBackend.String ---
|
|||
|
|||
func TestQA_IOBackend_String(t *testing.T) { |
|||
cases := []struct { |
|||
b IOBackend |
|||
want string |
|||
}{ |
|||
{IOBackendAuto, "auto"}, |
|||
{IOBackendStandard, "standard"}, |
|||
{IOBackendIOURing, "io_uring"}, |
|||
{IOBackend(99), "unknown(99)"}, |
|||
{IOBackend(-1), "unknown(-1)"}, |
|||
} |
|||
for _, tc := range cases { |
|||
got := tc.b.String() |
|||
if got != tc.want { |
|||
t.Errorf("IOBackend(%d).String() = %q, want %q", int(tc.b), got, tc.want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// --- ResolveIOBackend ---
|
|||
|
|||
func TestQA_ResolveIOBackend(t *testing.T) { |
|||
// Auto resolves to standard.
|
|||
if got := ResolveIOBackend(IOBackendAuto); got != IOBackendStandard { |
|||
t.Fatalf("ResolveIOBackend(Auto) = %v, want Standard", got) |
|||
} |
|||
// Standard stays standard.
|
|||
if got := ResolveIOBackend(IOBackendStandard); got != IOBackendStandard { |
|||
t.Fatalf("ResolveIOBackend(Standard) = %v, want Standard", got) |
|||
} |
|||
// IOURing stays io_uring (resolve doesn't validate, just maps auto).
|
|||
if got := ResolveIOBackend(IOBackendIOURing); got != IOBackendIOURing { |
|||
t.Fatalf("ResolveIOBackend(IOURing) = %v, want IOURing", got) |
|||
} |
|||
} |
|||
|
|||
// --- Validate IOBackend field ---
|
|||
|
|||
func TestQA_Config_Validate_IOBackend_AutoOK(t *testing.T) { |
|||
cfg := DefaultConfig() |
|||
cfg.IOBackend = IOBackendAuto |
|||
if err := cfg.Validate(); err != nil { |
|||
t.Fatalf("Validate with IOBackendAuto: %v", err) |
|||
} |
|||
} |
|||
|
|||
func TestQA_Config_Validate_IOBackend_StandardOK(t *testing.T) { |
|||
cfg := DefaultConfig() |
|||
cfg.IOBackend = IOBackendStandard |
|||
if err := cfg.Validate(); err != nil { |
|||
t.Fatalf("Validate with IOBackendStandard: %v", err) |
|||
} |
|||
} |
|||
|
|||
func TestQA_Config_Validate_IOBackend_IOURingRejected(t *testing.T) { |
|||
cfg := DefaultConfig() |
|||
cfg.IOBackend = IOBackendIOURing |
|||
err := cfg.Validate() |
|||
if err == nil { |
|||
t.Fatal("Validate should reject IOBackendIOURing (not yet implemented)") |
|||
} |
|||
if !strings.Contains(err.Error(), "not yet implemented") { |
|||
t.Fatalf("error should mention 'not yet implemented', got: %v", err) |
|||
} |
|||
} |
|||
|
|||
func TestQA_Config_Validate_IOBackend_OutOfRange(t *testing.T) { |
|||
cfg := DefaultConfig() |
|||
cfg.IOBackend = IOBackend(99) |
|||
err := cfg.Validate() |
|||
if err == nil { |
|||
t.Fatal("Validate should reject out-of-range IOBackend") |
|||
} |
|||
if !strings.Contains(err.Error(), "unknown IOBackend") { |
|||
t.Fatalf("error should mention 'unknown IOBackend', got: %v", err) |
|||
} |
|||
} |
|||
|
|||
func TestQA_Config_Validate_IOBackend_NegativeValue(t *testing.T) { |
|||
cfg := DefaultConfig() |
|||
cfg.IOBackend = IOBackend(-1) |
|||
err := cfg.Validate() |
|||
if err == nil { |
|||
t.Fatal("Validate should reject negative IOBackend") |
|||
} |
|||
} |
|||
|
|||
// --- DefaultConfig IOBackend ---
|
|||
|
|||
func TestQA_DefaultConfig_IOBackend_IsAuto(t *testing.T) { |
|||
cfg := DefaultConfig() |
|||
if cfg.IOBackend != IOBackendAuto { |
|||
t.Fatalf("DefaultConfig().IOBackend = %v, want Auto (zero value)", cfg.IOBackend) |
|||
} |
|||
} |
|||
|
|||
// --- applyDefaults does NOT override IOBackend ---
|
|||
|
|||
func TestQA_ApplyDefaults_IOBackend_ZeroStaysAuto(t *testing.T) { |
|||
cfg := BlockVolConfig{} |
|||
cfg.applyDefaults() |
|||
// IOBackend is not in applyDefaults — zero value (Auto) should remain.
|
|||
if cfg.IOBackend != IOBackendAuto { |
|||
t.Fatalf("applyDefaults left IOBackend = %v, want Auto", cfg.IOBackend) |
|||
} |
|||
} |
|||
|
|||
func TestQA_ApplyDefaults_IOBackend_ExplicitPreserved(t *testing.T) { |
|||
cfg := BlockVolConfig{IOBackend: IOBackendStandard} |
|||
cfg.applyDefaults() |
|||
if cfg.IOBackend != IOBackendStandard { |
|||
t.Fatalf("applyDefaults changed IOBackend from Standard to %v", cfg.IOBackend) |
|||
} |
|||
} |
|||
|
|||
// --- Round-trip: parse → resolve → string ---
|
|||
|
|||
func TestQA_IOBackend_RoundTrip(t *testing.T) { |
|||
for _, input := range []string{"auto", "standard"} { |
|||
b, err := ParseIOBackend(input) |
|||
if err != nil { |
|||
t.Fatalf("ParseIOBackend(%q): %v", input, err) |
|||
} |
|||
resolved := ResolveIOBackend(b) |
|||
s := resolved.String() |
|||
if s != "standard" { |
|||
t.Fatalf("round-trip %q → resolve → string = %q, want standard", input, s) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// --- Iota ordering stability ---
|
|||
|
|||
func TestQA_IOBackend_IotaValues(t *testing.T) { |
|||
// These values are persisted/transmitted — they must never change.
|
|||
if IOBackendAuto != 0 { |
|||
t.Fatalf("IOBackendAuto = %d, want 0", IOBackendAuto) |
|||
} |
|||
if IOBackendStandard != 1 { |
|||
t.Fatalf("IOBackendStandard = %d, want 1", IOBackendStandard) |
|||
} |
|||
if IOBackendIOURing != 2 { |
|||
t.Fatalf("IOBackendIOURing = %d, want 2", IOBackendIOURing) |
|||
} |
|||
} |
|||
@ -0,0 +1,567 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"bytes" |
|||
"crypto/rand" |
|||
"errors" |
|||
"fmt" |
|||
"os" |
|||
"path/filepath" |
|||
"sync" |
|||
"sync/atomic" |
|||
"testing" |
|||
) |
|||
|
|||
// =============================================================================
|
|||
// QA Adversarial Tests for StorageProfile (CP11A-1)
|
|||
//
|
|||
// These tests go beyond the dev-test coverage in storage_profile_test.go:
|
|||
// - SP-A1: write/read data integrity on single profile
|
|||
// - SP-A2: concurrent writes with no corruption
|
|||
// - additional: crash recovery, superblock byte corruption, boundary cases
|
|||
// =============================================================================
|
|||
|
|||
// TestQA_Profile_WritePath_SingleCorrect writes multiple blocks at different
|
|||
// LBAs on a single-profile volume, reads them back, and verifies byte-for-byte
|
|||
// correctness. This is SP-A1 from the test spec.
|
|||
func TestQA_Profile_WritePath_SingleCorrect(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "sp-a1.blk") |
|||
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 256 * 1024, // 256KB = 64 blocks
|
|||
BlockSize: 4096, |
|||
WALSize: 128 * 1024, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("Create: %v", err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
if vol.Profile() != ProfileSingle { |
|||
t.Fatalf("Profile() = %v, want single", vol.Profile()) |
|||
} |
|||
|
|||
// Write unique patterns to blocks 0, 10, 30, 63 (last block).
|
|||
type testBlock struct { |
|||
lba uint64 |
|||
fill byte |
|||
} |
|||
blocks := []testBlock{ |
|||
{0, 0xAA}, |
|||
{10, 0xBB}, |
|||
{30, 0xCC}, |
|||
{63, 0xDD}, // last block in 256KB volume
|
|||
} |
|||
|
|||
for _, b := range blocks { |
|||
data := make([]byte, 4096) |
|||
for i := range data { |
|||
data[i] = b.fill |
|||
} |
|||
if err := vol.WriteLBA(b.lba, data); err != nil { |
|||
t.Fatalf("WriteLBA(%d): %v", b.lba, err) |
|||
} |
|||
} |
|||
|
|||
// SyncCache to ensure WAL is durable.
|
|||
if err := vol.SyncCache(); err != nil { |
|||
t.Fatalf("SyncCache: %v", err) |
|||
} |
|||
|
|||
// Read back and verify.
|
|||
for _, b := range blocks { |
|||
got, err := vol.ReadLBA(b.lba, 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(%d): %v", b.lba, err) |
|||
} |
|||
expected := make([]byte, 4096) |
|||
for i := range expected { |
|||
expected[i] = b.fill |
|||
} |
|||
if !bytes.Equal(got, expected) { |
|||
t.Errorf("LBA %d: data mismatch (first byte: got 0x%02X, want 0x%02X)", |
|||
b.lba, got[0], b.fill) |
|||
} |
|||
} |
|||
|
|||
// Unwritten blocks should read as zeros.
|
|||
zeros, err := vol.ReadLBA(5, 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(5): %v", err) |
|||
} |
|||
for i, b := range zeros { |
|||
if b != 0 { |
|||
t.Fatalf("LBA 5 byte[%d] = 0x%02X, want 0x00 (unwritten)", i, b) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_ConcurrentWrites_Single runs 16 goroutines writing to
|
|||
// non-overlapping LBAs on a single-profile volume. No data corruption
|
|||
// or panics should occur. This is SP-A2 from the test spec.
|
|||
func TestQA_Profile_ConcurrentWrites_Single(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "sp-a2.blk") |
|||
|
|||
// 1MB volume = 256 blocks. Each of 16 goroutines gets 16 blocks.
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 1024 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 512 * 1024, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("Create: %v", err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
const goroutines = 16 |
|||
const blocksPerGoroutine = 16 |
|||
var wg sync.WaitGroup |
|||
errs := make([]error, goroutines) |
|||
|
|||
for g := 0; g < goroutines; g++ { |
|||
wg.Add(1) |
|||
go func(gid int) { |
|||
defer wg.Done() |
|||
baseLBA := uint64(gid * blocksPerGoroutine) |
|||
fill := byte(gid + 1) // unique fill per goroutine
|
|||
|
|||
for i := 0; i < blocksPerGoroutine; i++ { |
|||
data := make([]byte, 4096) |
|||
for j := range data { |
|||
data[j] = fill |
|||
} |
|||
if err := vol.WriteLBA(baseLBA+uint64(i), data); err != nil { |
|||
errs[gid] = fmt.Errorf("goroutine %d LBA %d: %v", gid, baseLBA+uint64(i), err) |
|||
return |
|||
} |
|||
} |
|||
}(g) |
|||
} |
|||
wg.Wait() |
|||
|
|||
for i, err := range errs { |
|||
if err != nil { |
|||
t.Fatalf("goroutine %d: %v", i, err) |
|||
} |
|||
} |
|||
|
|||
// Sync and verify all data.
|
|||
if err := vol.SyncCache(); err != nil { |
|||
t.Fatalf("SyncCache: %v", err) |
|||
} |
|||
|
|||
for g := 0; g < goroutines; g++ { |
|||
baseLBA := uint64(g * blocksPerGoroutine) |
|||
fill := byte(g + 1) |
|||
for i := 0; i < blocksPerGoroutine; i++ { |
|||
lba := baseLBA + uint64(i) |
|||
got, err := vol.ReadLBA(lba, 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(%d): %v", lba, err) |
|||
} |
|||
for j, b := range got { |
|||
if b != fill { |
|||
t.Fatalf("LBA %d byte[%d] = 0x%02X, want 0x%02X (goroutine %d)", |
|||
lba, j, b, fill, g) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_SurvivesCrashRecovery writes data on a single-profile
|
|||
// volume, simulates a crash (close without clean shutdown), reopens, and
|
|||
// verifies that the profile metadata and data are intact.
|
|||
func TestQA_Profile_SurvivesCrashRecovery(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "sp-crash.blk") |
|||
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 64 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 32 * 1024, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("Create: %v", err) |
|||
} |
|||
|
|||
// Write known data.
|
|||
data := make([]byte, 4096) |
|||
for i := range data { |
|||
data[i] = 0xEE |
|||
} |
|||
if err := vol.WriteLBA(0, data); err != nil { |
|||
t.Fatalf("WriteLBA: %v", err) |
|||
} |
|||
if err := vol.SyncCache(); err != nil { |
|||
t.Fatalf("SyncCache: %v", err) |
|||
} |
|||
|
|||
// Close normally (simulates a crash by just closing).
|
|||
vol.Close() |
|||
|
|||
// Reopen — crash recovery runs.
|
|||
vol2, err := OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("Reopen: %v", err) |
|||
} |
|||
defer vol2.Close() |
|||
|
|||
if vol2.Profile() != ProfileSingle { |
|||
t.Errorf("Profile after reopen = %v, want single", vol2.Profile()) |
|||
} |
|||
|
|||
got, err := vol2.ReadLBA(0, 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA after reopen: %v", err) |
|||
} |
|||
if got[0] != 0xEE { |
|||
t.Errorf("data[0] = 0x%02X, want 0xEE", got[0]) |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_CorruptByte_AllValues corrupts the StorageProfile byte on
|
|||
// disk to every value 2..255 and verifies that OpenBlockVol rejects each one.
|
|||
func TestQA_Profile_CorruptByte_AllValues(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "sp-corrupt-all.blk") |
|||
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 64 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 32 * 1024, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("Create: %v", err) |
|||
} |
|||
vol.Close() |
|||
|
|||
// Read original file for restoration.
|
|||
original, err := os.ReadFile(path) |
|||
if err != nil { |
|||
t.Fatalf("read: %v", err) |
|||
} |
|||
|
|||
for corruptVal := byte(2); corruptVal != 0; corruptVal++ { // 2..255
|
|||
// Restore original, then corrupt.
|
|||
if err := os.WriteFile(path, original, 0644); err != nil { |
|||
t.Fatalf("restore: %v", err) |
|||
} |
|||
f, err := os.OpenFile(path, os.O_RDWR, 0644) |
|||
if err != nil { |
|||
t.Fatalf("open: %v", err) |
|||
} |
|||
if _, err := f.WriteAt([]byte{corruptVal}, 105); err != nil { |
|||
f.Close() |
|||
t.Fatalf("corrupt: %v", err) |
|||
} |
|||
f.Close() |
|||
|
|||
_, err = OpenBlockVol(path) |
|||
if err == nil { |
|||
t.Errorf("StorageProfile=%d: OpenBlockVol should fail", corruptVal) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_StripedReject_NoFileLeaked verifies that attempting to
|
|||
// create a striped volume does not leak partial files, even under different
|
|||
// config combinations.
|
|||
func TestQA_Profile_StripedReject_NoFileLeaked(t *testing.T) { |
|||
dir := t.TempDir() |
|||
|
|||
configs := []CreateOptions{ |
|||
{VolumeSize: 64 * 1024, StorageProfile: ProfileStriped}, |
|||
{VolumeSize: 1024 * 1024, StorageProfile: ProfileStriped, WALSize: 256 * 1024}, |
|||
{VolumeSize: 64 * 1024, StorageProfile: ProfileStriped, BlockSize: 512}, |
|||
} |
|||
|
|||
for i, opts := range configs { |
|||
path := filepath.Join(dir, fmt.Sprintf("striped-%d.blk", i)) |
|||
_, err := CreateBlockVol(path, opts) |
|||
if !errors.Is(err, ErrStripedNotImplemented) { |
|||
t.Errorf("config %d: error = %v, want ErrStripedNotImplemented", i, err) |
|||
} |
|||
if _, statErr := os.Stat(path); !os.IsNotExist(statErr) { |
|||
t.Errorf("config %d: file %s should not exist after rejected create", i, path) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_ConcurrentCreateSameFile races multiple goroutines trying
|
|||
// to create a volume at the same path. Exactly one should succeed (O_EXCL),
|
|||
// the rest should fail. No partial files should remain from losers.
|
|||
func TestQA_Profile_ConcurrentCreateSameFile(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "race.blk") |
|||
|
|||
const racers = 8 |
|||
var ( |
|||
wg sync.WaitGroup |
|||
wins atomic.Int32 |
|||
errCount atomic.Int32 |
|||
) |
|||
|
|||
for i := 0; i < racers; i++ { |
|||
wg.Add(1) |
|||
go func() { |
|||
defer wg.Done() |
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 64 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 32 * 1024, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
errCount.Add(1) |
|||
return |
|||
} |
|||
wins.Add(1) |
|||
vol.Close() |
|||
}() |
|||
} |
|||
wg.Wait() |
|||
|
|||
if wins.Load() != 1 { |
|||
t.Errorf("winners = %d, want exactly 1", wins.Load()) |
|||
} |
|||
if errCount.Load() != racers-1 { |
|||
t.Errorf("errors = %d, want %d", errCount.Load(), racers-1) |
|||
} |
|||
|
|||
// The winner's file should be valid.
|
|||
vol, err := OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("OpenBlockVol winner file: %v", err) |
|||
} |
|||
defer vol.Close() |
|||
if vol.Profile() != ProfileSingle { |
|||
t.Errorf("Profile() = %v, want single", vol.Profile()) |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_SuperblockByteOffset verifies the StorageProfile byte is
|
|||
// at the exact expected offset (105) in the on-disk format. This prevents
|
|||
// silent field-reorder regressions.
|
|||
func TestQA_Profile_SuperblockByteOffset(t *testing.T) { |
|||
sb, err := NewSuperblock(64*1024, CreateOptions{ |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("NewSuperblock: %v", err) |
|||
} |
|||
|
|||
// Write the superblock for single profile.
|
|||
var buf bytes.Buffer |
|||
sb.WriteTo(&buf) |
|||
data := buf.Bytes() |
|||
|
|||
if data[105] != 0 { |
|||
t.Errorf("offset 105 = %d, want 0 (ProfileSingle)", data[105]) |
|||
} |
|||
|
|||
// Now set striped and check the byte changed.
|
|||
sb.StorageProfile = uint8(ProfileStriped) |
|||
var buf2 bytes.Buffer |
|||
sb.WriteTo(&buf2) |
|||
data2 := buf2.Bytes() |
|||
|
|||
if data2[105] != 1 { |
|||
t.Errorf("offset 105 = %d, want 1 (ProfileStriped)", data2[105]) |
|||
} |
|||
|
|||
// Verify all other bytes are identical (only offset 105 changed).
|
|||
for i := range data { |
|||
if i == 105 { |
|||
continue |
|||
} |
|||
if data[i] != data2[i] { |
|||
t.Errorf("byte[%d] changed: 0x%02X -> 0x%02X (only offset 105 should differ)", i, data[i], data2[i]) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_MultiBlockWriteRead writes a multi-block (16KB) payload
|
|||
// at a non-zero LBA and reads it back on a single-profile volume.
|
|||
// Catches alignment and multi-block dirty-map consistency bugs.
|
|||
func TestQA_Profile_MultiBlockWriteRead(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "sp-multi.blk") |
|||
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 512 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 256 * 1024, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("Create: %v", err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
// Write 4 blocks (16KB) of random data at LBA 20.
|
|||
payload := make([]byte, 16384) |
|||
if _, err := rand.Read(payload); err != nil { |
|||
t.Fatalf("rand: %v", err) |
|||
} |
|||
|
|||
if err := vol.WriteLBA(20, payload); err != nil { |
|||
t.Fatalf("WriteLBA: %v", err) |
|||
} |
|||
if err := vol.SyncCache(); err != nil { |
|||
t.Fatalf("SyncCache: %v", err) |
|||
} |
|||
|
|||
got, err := vol.ReadLBA(20, 16384) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA: %v", err) |
|||
} |
|||
if !bytes.Equal(got, payload) { |
|||
t.Error("multi-block payload mismatch") |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_ExpandPreservesProfile verifies that expanding a
|
|||
// single-profile volume preserves the profile metadata.
|
|||
func TestQA_Profile_ExpandPreservesProfile(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "sp-expand.blk") |
|||
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 64 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 32 * 1024, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("Create: %v", err) |
|||
} |
|||
|
|||
// Write at LBA 0 before expand.
|
|||
data := make([]byte, 4096) |
|||
for i := range data { |
|||
data[i] = 0x42 |
|||
} |
|||
if err := vol.WriteLBA(0, data); err != nil { |
|||
t.Fatalf("WriteLBA: %v", err) |
|||
} |
|||
|
|||
// Expand to 128KB.
|
|||
if err := vol.Expand(128 * 1024); err != nil { |
|||
t.Fatalf("Expand: %v", err) |
|||
} |
|||
|
|||
if vol.Profile() != ProfileSingle { |
|||
t.Errorf("Profile after expand = %v, want single", vol.Profile()) |
|||
} |
|||
|
|||
// Verify data at LBA 0 survived.
|
|||
got, err := vol.ReadLBA(0, 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(0): %v", err) |
|||
} |
|||
if got[0] != 0x42 { |
|||
t.Errorf("data[0] = 0x%02X, want 0x42", got[0]) |
|||
} |
|||
|
|||
// Write to new region (LBA 16+ is in expanded area).
|
|||
newData := make([]byte, 4096) |
|||
for i := range newData { |
|||
newData[i] = 0x99 |
|||
} |
|||
if err := vol.WriteLBA(20, newData); err != nil { |
|||
t.Fatalf("WriteLBA(20): %v", err) |
|||
} |
|||
|
|||
got2, err := vol.ReadLBA(20, 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(20): %v", err) |
|||
} |
|||
if got2[0] != 0x99 { |
|||
t.Errorf("expanded LBA 20 data[0] = 0x%02X, want 0x99", got2[0]) |
|||
} |
|||
|
|||
// Close and reopen — verify profile and data survive.
|
|||
vol.Close() |
|||
vol2, err := OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("Reopen: %v", err) |
|||
} |
|||
defer vol2.Close() |
|||
|
|||
if vol2.Profile() != ProfileSingle { |
|||
t.Errorf("Profile after reopen = %v, want single", vol2.Profile()) |
|||
} |
|||
if vol2.Info().VolumeSize != 128*1024 { |
|||
t.Errorf("VolumeSize = %d, want %d", vol2.Info().VolumeSize, 128*1024) |
|||
} |
|||
} |
|||
|
|||
// TestQA_Profile_SnapshotPreservesProfile creates a snapshot on a
|
|||
// single-profile volume, writes more data, restores the snapshot,
|
|||
// and verifies the profile metadata is unchanged.
|
|||
func TestQA_Profile_SnapshotPreservesProfile(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "sp-snap.blk") |
|||
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 64 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 32 * 1024, |
|||
StorageProfile: ProfileSingle, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("Create: %v", err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
// Write block A.
|
|||
dataA := make([]byte, 4096) |
|||
for i := range dataA { |
|||
dataA[i] = 0xAA |
|||
} |
|||
if err := vol.WriteLBA(0, dataA); err != nil { |
|||
t.Fatalf("WriteLBA(A): %v", err) |
|||
} |
|||
|
|||
// Create snapshot.
|
|||
if err := vol.CreateSnapshot(1); err != nil { |
|||
t.Fatalf("CreateSnapshot: %v", err) |
|||
} |
|||
|
|||
// Write block B (overwrites A at LBA 0).
|
|||
dataB := make([]byte, 4096) |
|||
for i := range dataB { |
|||
dataB[i] = 0xBB |
|||
} |
|||
if err := vol.WriteLBA(0, dataB); err != nil { |
|||
t.Fatalf("WriteLBA(B): %v", err) |
|||
} |
|||
|
|||
// Verify live reads B.
|
|||
got, _ := vol.ReadLBA(0, 4096) |
|||
if got[0] != 0xBB { |
|||
t.Fatalf("live data[0] = 0x%02X, want 0xBB", got[0]) |
|||
} |
|||
|
|||
// Restore snapshot.
|
|||
if err := vol.RestoreSnapshot(1); err != nil { |
|||
t.Fatalf("RestoreSnapshot: %v", err) |
|||
} |
|||
|
|||
// Profile should be unchanged.
|
|||
if vol.Profile() != ProfileSingle { |
|||
t.Errorf("Profile after restore = %v, want single", vol.Profile()) |
|||
} |
|||
|
|||
// Data should be A again.
|
|||
got2, _ := vol.ReadLBA(0, 4096) |
|||
if got2[0] != 0xAA { |
|||
t.Errorf("restored data[0] = 0x%02X, want 0xAA", got2[0]) |
|||
} |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue