Browse Source
feat: Phase 3 — performance tuning, iSCSI session refactor, store integration
feat: Phase 3 — performance tuning, iSCSI session refactor, store integration
Phase 3 delivers five checkpoints: CP1 Engine Tuning: BlockVolConfig tunables, 256-shard DirtyMap, adaptive group commit (low-watermark immediate flush), WAL pressure handling with backpressure and ErrWALFull timeout. CP2 iSCSI Session Refactor: RX/TX goroutine split with respCh (cap 64), txLoop for serialized response writes, StatSN assignment modes. Login phase stays single-goroutine; full-duplex after login. CP3 Store Integration: BlockVolAdapter (iscsi.BlockDevice interface), BlockVolumeStore management, BlockService in volume_server_block.go, CLI flags (--block.listen/dir/iqn.prefix), sw-block-attach.sh helper. CP5 Concurrency Hardening: WAL reuse guard (LSN validation in ReadLBA), opsOutstanding counter with beginOp/endOp + Close drain, appendWithRetry shared by WriteLBA and TrimLBA, flusher LSN guard in FlushOnce. Bug fixes (P3-BUG-2–11): unbounded pending queue cap, Data-Out timeout, flusher error logging, GroupCommitter panic recovery, Close vs concurrent ops guard, target shutdown race, WAL-full retry vs Close, WRITE SAME(16) for XFS, MODE SENSE(10) + VPD 0xB0/0xB2 for Linux kernel compatibility. 797 tests passing (517 engine + 280 iSCSI), go vet clean. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>feature/sw-block
37 changed files with 6566 additions and 458 deletions
-
12weed/command/volume.go
-
114weed/server/volume_server_block.go
-
59weed/server/volume_server_block_test.go
-
47weed/storage/blockvol/adapter.go
-
70weed/storage/blockvol/adapter_test.go
-
241weed/storage/blockvol/blockvol.go
-
221weed/storage/blockvol/blockvol_qa_test.go
-
1011weed/storage/blockvol/blockvol_test.go
-
49weed/storage/blockvol/bug_gc_panic_test.go
-
86weed/storage/blockvol/config.go
-
115weed/storage/blockvol/config_test.go
-
115weed/storage/blockvol/dirty_map.go
-
230weed/storage/blockvol/dirty_map_test.go
-
69weed/storage/blockvol/flusher.go
-
123weed/storage/blockvol/flusher_test.go
-
108weed/storage/blockvol/group_commit.go
-
297weed/storage/blockvol/group_commit_test.go
-
69weed/storage/blockvol/iscsi/bug_dataout_timeout_test.go
-
114weed/storage/blockvol/iscsi/bug_pending_unbounded_test.go
-
76weed/storage/blockvol/iscsi/dataio.go
-
5weed/storage/blockvol/iscsi/dataio_test.go
-
3weed/storage/blockvol/iscsi/login.go
-
634weed/storage/blockvol/iscsi/qa_rxtx_test.go
-
536weed/storage/blockvol/iscsi/qa_stability_test.go
-
188weed/storage/blockvol/iscsi/scsi.go
-
248weed/storage/blockvol/iscsi/scsi_test.go
-
306weed/storage/blockvol/iscsi/session.go
-
775weed/storage/blockvol/iscsi/session_test.go
-
7weed/storage/blockvol/iscsi/target.go
-
45weed/storage/blockvol/iscsi/target_test.go
-
503weed/storage/blockvol/qa_phase3_engine_test.go
-
77weed/storage/blockvol/recovery_test.go
-
185weed/storage/blockvol/scripts/sw-block-attach.sh
-
12weed/storage/blockvol/wal_writer.go
-
72weed/storage/blockvol/wal_writer_test.go
-
93weed/storage/store_blockvol.go
-
109weed/storage/store_blockvol_test.go
@ -0,0 +1,114 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"log" |
|||
"os" |
|||
"path/filepath" |
|||
"strings" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/iscsi" |
|||
) |
|||
|
|||
// BlockService manages block volumes and the iSCSI target server.
|
|||
type BlockService struct { |
|||
blockStore *storage.BlockVolumeStore |
|||
targetServer *iscsi.TargetServer |
|||
iqnPrefix string |
|||
} |
|||
|
|||
// StartBlockService scans blockDir for .blk files, opens them as block volumes,
|
|||
// registers them with an iSCSI target server, and starts listening.
|
|||
// Returns nil if blockDir is empty (feature disabled).
|
|||
func StartBlockService(listenAddr, blockDir, iqnPrefix string) *BlockService { |
|||
if blockDir == "" { |
|||
return nil |
|||
} |
|||
|
|||
if iqnPrefix == "" { |
|||
iqnPrefix = "iqn.2024-01.com.seaweedfs:vol." |
|||
} |
|||
|
|||
bs := &BlockService{ |
|||
blockStore: storage.NewBlockVolumeStore(), |
|||
iqnPrefix: iqnPrefix, |
|||
} |
|||
|
|||
logger := log.New(os.Stderr, "iscsi: ", log.LstdFlags) |
|||
|
|||
config := iscsi.DefaultTargetConfig() |
|||
config.TargetName = iqnPrefix + "default" |
|||
|
|||
bs.targetServer = iscsi.NewTargetServer(listenAddr, config, logger) |
|||
|
|||
// Scan blockDir for .blk files.
|
|||
entries, err := os.ReadDir(blockDir) |
|||
if err != nil { |
|||
glog.Warningf("block service: cannot read dir %s: %v", blockDir, err) |
|||
return bs |
|||
} |
|||
|
|||
for _, entry := range entries { |
|||
if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".blk") { |
|||
continue |
|||
} |
|||
path := filepath.Join(blockDir, entry.Name()) |
|||
vol, err := bs.blockStore.AddBlockVolume(path) |
|||
if err != nil { |
|||
// Auto-initialize raw files (e.g. created via truncate).
|
|||
info, serr := entry.Info() |
|||
if serr == nil && info.Size() > 0 { |
|||
glog.V(0).Infof("block service: auto-creating blockvol %s (%d bytes)", path, info.Size()) |
|||
os.Remove(path) // remove raw file so CreateBlockVol can use O_EXCL
|
|||
created, cerr := blockvol.CreateBlockVol(path, blockvol.CreateOptions{ |
|||
VolumeSize: uint64(info.Size()), |
|||
}) |
|||
if cerr != nil { |
|||
glog.Warningf("block service: auto-create %s: %v", path, cerr) |
|||
continue |
|||
} |
|||
created.Close() |
|||
vol, err = bs.blockStore.AddBlockVolume(path) |
|||
if err != nil { |
|||
glog.Warningf("block service: skip %s after auto-create: %v", path, err) |
|||
continue |
|||
} |
|||
} else { |
|||
glog.Warningf("block service: skip %s: %v", path, err) |
|||
continue |
|||
} |
|||
} |
|||
|
|||
// Derive IQN from filename: vol1.blk -> iqn.2024-01.com.seaweedfs:vol.vol1
|
|||
name := strings.TrimSuffix(entry.Name(), ".blk") |
|||
iqn := iqnPrefix + name |
|||
adapter := blockvol.NewBlockVolAdapter(vol) |
|||
bs.targetServer.AddVolume(iqn, adapter) |
|||
glog.V(0).Infof("block service: registered %s as %s", path, iqn) |
|||
} |
|||
|
|||
// Start iSCSI target in background.
|
|||
go func() { |
|||
if err := bs.targetServer.ListenAndServe(); err != nil { |
|||
glog.Warningf("block service: iSCSI target stopped: %v", err) |
|||
} |
|||
}() |
|||
|
|||
glog.V(0).Infof("block service: iSCSI target started on %s", listenAddr) |
|||
return bs |
|||
} |
|||
|
|||
// Shutdown gracefully stops the iSCSI target and closes all block volumes.
|
|||
func (bs *BlockService) Shutdown() { |
|||
if bs == nil { |
|||
return |
|||
} |
|||
glog.V(0).Infof("block service: shutting down...") |
|||
if bs.targetServer != nil { |
|||
bs.targetServer.Close() |
|||
} |
|||
bs.blockStore.Close() |
|||
glog.V(0).Infof("block service: shut down") |
|||
} |
|||
@ -0,0 +1,59 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"path/filepath" |
|||
"testing" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol" |
|||
) |
|||
|
|||
func createTestBlockVolFile(t *testing.T, dir, name string) string { |
|||
t.Helper() |
|||
path := filepath.Join(dir, name) |
|||
vol, err := blockvol.CreateBlockVol(path, blockvol.CreateOptions{ |
|||
VolumeSize: 1024 * 4096, |
|||
BlockSize: 4096, |
|||
ExtentSize: 65536, |
|||
WALSize: 1 << 20, |
|||
}) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
vol.Close() |
|||
return path |
|||
} |
|||
|
|||
func TestBlockServiceDisabledByDefault(t *testing.T) { |
|||
// Empty blockDir means feature is disabled.
|
|||
bs := StartBlockService("0.0.0.0:3260", "", "") |
|||
if bs != nil { |
|||
bs.Shutdown() |
|||
t.Fatal("expected nil BlockService when blockDir is empty") |
|||
} |
|||
|
|||
// Shutdown on nil should be safe (no panic).
|
|||
var nilBS *BlockService |
|||
nilBS.Shutdown() |
|||
} |
|||
|
|||
func TestBlockServiceStartAndShutdown(t *testing.T) { |
|||
dir := t.TempDir() |
|||
createTestBlockVolFile(t, dir, "testvol.blk") |
|||
|
|||
bs := StartBlockService("127.0.0.1:0", dir, "iqn.2024-01.com.test:vol.") |
|||
if bs == nil { |
|||
t.Fatal("expected non-nil BlockService") |
|||
} |
|||
defer bs.Shutdown() |
|||
|
|||
// Verify the volume was registered.
|
|||
paths := bs.blockStore.ListBlockVolumes() |
|||
if len(paths) != 1 { |
|||
t.Fatalf("expected 1 volume, got %d", len(paths)) |
|||
} |
|||
|
|||
expected := filepath.Join(dir, "testvol.blk") |
|||
if paths[0] != expected { |
|||
t.Fatalf("expected path %s, got %s", expected, paths[0]) |
|||
} |
|||
} |
|||
@ -0,0 +1,47 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/iscsi" |
|||
) |
|||
|
|||
// BlockVolAdapter wraps a *BlockVol to implement the iscsi.BlockDevice interface,
|
|||
// bridging the BlockVol storage engine to iSCSI target sessions.
|
|||
type BlockVolAdapter struct { |
|||
vol *BlockVol |
|||
} |
|||
|
|||
// NewBlockVolAdapter creates a BlockDevice adapter for the given BlockVol.
|
|||
func NewBlockVolAdapter(vol *BlockVol) *BlockVolAdapter { |
|||
return &BlockVolAdapter{vol: vol} |
|||
} |
|||
|
|||
func (a *BlockVolAdapter) ReadAt(lba uint64, length uint32) ([]byte, error) { |
|||
return a.vol.ReadLBA(lba, length) |
|||
} |
|||
|
|||
func (a *BlockVolAdapter) WriteAt(lba uint64, data []byte) error { |
|||
return a.vol.WriteLBA(lba, data) |
|||
} |
|||
|
|||
func (a *BlockVolAdapter) Trim(lba uint64, length uint32) error { |
|||
return a.vol.Trim(lba, length) |
|||
} |
|||
|
|||
func (a *BlockVolAdapter) SyncCache() error { |
|||
return a.vol.SyncCache() |
|||
} |
|||
|
|||
func (a *BlockVolAdapter) BlockSize() uint32 { |
|||
return a.vol.Info().BlockSize |
|||
} |
|||
|
|||
func (a *BlockVolAdapter) VolumeSize() uint64 { |
|||
return a.vol.Info().VolumeSize |
|||
} |
|||
|
|||
func (a *BlockVolAdapter) IsHealthy() bool { |
|||
return a.vol.Info().Healthy |
|||
} |
|||
|
|||
// Compile-time check that BlockVolAdapter implements iscsi.BlockDevice.
|
|||
var _ iscsi.BlockDevice = (*BlockVolAdapter)(nil) |
|||
@ -0,0 +1,70 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"path/filepath" |
|||
"testing" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/iscsi" |
|||
) |
|||
|
|||
func TestAdapterImplementsInterface(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "adapter_test.blk") |
|||
|
|||
vol, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 1024 * 4096, // 1024 blocks
|
|||
BlockSize: 4096, |
|||
ExtentSize: 65536, |
|||
WALSize: 1 << 20, |
|||
}) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
adapter := NewBlockVolAdapter(vol) |
|||
|
|||
// Verify it satisfies the interface.
|
|||
var _ iscsi.BlockDevice = adapter |
|||
|
|||
// Test basic operations through the adapter.
|
|||
if adapter.BlockSize() != 4096 { |
|||
t.Fatalf("BlockSize: got %d, want 4096", adapter.BlockSize()) |
|||
} |
|||
if adapter.VolumeSize() != 1024*4096 { |
|||
t.Fatalf("VolumeSize: got %d, want %d", adapter.VolumeSize(), 1024*4096) |
|||
} |
|||
if !adapter.IsHealthy() { |
|||
t.Fatal("expected healthy") |
|||
} |
|||
|
|||
// Write and read back through adapter.
|
|||
data := make([]byte, 4096) |
|||
for i := range data { |
|||
data[i] = 0xAB |
|||
} |
|||
if err := adapter.WriteAt(0, data); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
got, err := adapter.ReadAt(0, 4096) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if len(got) != 4096 { |
|||
t.Fatalf("ReadAt length: got %d, want 4096", len(got)) |
|||
} |
|||
if got[0] != 0xAB || got[4095] != 0xAB { |
|||
t.Fatal("data mismatch") |
|||
} |
|||
|
|||
// SyncCache through adapter.
|
|||
if err := adapter.SyncCache(); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Trim through adapter.
|
|||
if err := adapter.Trim(0, 4096); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
} |
|||
1011
weed/storage/blockvol/blockvol_test.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,49 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"testing" |
|||
"time" |
|||
) |
|||
|
|||
// TestBugGCPanicWaitersHung demonstrates that a panic in syncFunc
|
|||
// leaves all Submit() waiters permanently blocked.
|
|||
//
|
|||
// BUG: Run() has no panic recovery. When syncFunc panics, the batch
|
|||
// of waiters (each blocked on <-ch in Submit) are never notified.
|
|||
// They hang forever, leaking goroutines.
|
|||
//
|
|||
// FIX: Add defer/recover in Run() that drains pending waiters with
|
|||
// an error before exiting.
|
|||
//
|
|||
// This test FAILS until the bug is fixed.
|
|||
func TestBugGCPanicWaitersHung(t *testing.T) { |
|||
gc := NewGroupCommitter(GroupCommitterConfig{ |
|||
SyncFunc: func() error { |
|||
panic("simulated disk panic") |
|||
}, |
|||
MaxDelay: 10 * time.Millisecond, |
|||
}) |
|||
|
|||
// Wrap Run() so the panic doesn't kill the test process.
|
|||
go func() { |
|||
defer func() { recover() }() |
|||
gc.Run() |
|||
}() |
|||
|
|||
// Submit should return an error (not hang forever).
|
|||
result := make(chan error, 1) |
|||
go func() { |
|||
result <- gc.Submit() |
|||
}() |
|||
|
|||
select { |
|||
case err := <-result: |
|||
// GOOD: Submit returned (with an error, presumably).
|
|||
if err == nil { |
|||
t.Error("Submit returned nil; expected a panic-related error") |
|||
} |
|||
t.Logf("Submit returned: %v", err) |
|||
case <-time.After(3 * time.Second): |
|||
t.Fatal("BUG: Submit hung forever after syncFunc panic -- waiters not drained") |
|||
} |
|||
} |
|||
@ -0,0 +1,86 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"time" |
|||
) |
|||
|
|||
// BlockVolConfig configures tunable parameters for a BlockVol engine.
|
|||
// A zero-value config is valid and will use defaults via applyDefaults().
|
|||
type BlockVolConfig struct { |
|||
GroupCommitMaxDelay time.Duration // max wait before flushing a partial batch (default 1ms)
|
|||
GroupCommitMaxBatch int // flush immediately when this many waiters (default 64)
|
|||
GroupCommitLowWatermark int // skip delay if fewer than this many pending (default 4)
|
|||
WALPressureThreshold float64 // fraction of WAL used that triggers urgent flush (default 0.8)
|
|||
WALFullTimeout time.Duration // max retry time when WAL is full (default 5s)
|
|||
FlushInterval time.Duration // flusher periodic interval (default 100ms)
|
|||
DirtyMapShards int // number of dirty map shards, must be power-of-2 (default 256)
|
|||
} |
|||
|
|||
// DefaultConfig returns a BlockVolConfig with production defaults.
|
|||
func DefaultConfig() BlockVolConfig { |
|||
return BlockVolConfig{ |
|||
GroupCommitMaxDelay: 1 * time.Millisecond, |
|||
GroupCommitMaxBatch: 64, |
|||
GroupCommitLowWatermark: 4, |
|||
WALPressureThreshold: 0.8, |
|||
WALFullTimeout: 5 * time.Second, |
|||
FlushInterval: 100 * time.Millisecond, |
|||
DirtyMapShards: 256, |
|||
} |
|||
} |
|||
|
|||
// applyDefaults fills zero-value fields with production defaults.
|
|||
func (c *BlockVolConfig) applyDefaults() { |
|||
d := DefaultConfig() |
|||
if c.GroupCommitMaxDelay == 0 { |
|||
c.GroupCommitMaxDelay = d.GroupCommitMaxDelay |
|||
} |
|||
if c.GroupCommitMaxBatch == 0 { |
|||
c.GroupCommitMaxBatch = d.GroupCommitMaxBatch |
|||
} |
|||
if c.GroupCommitLowWatermark == 0 { |
|||
c.GroupCommitLowWatermark = d.GroupCommitLowWatermark |
|||
} |
|||
if c.WALPressureThreshold == 0 { |
|||
c.WALPressureThreshold = d.WALPressureThreshold |
|||
} |
|||
if c.WALFullTimeout == 0 { |
|||
c.WALFullTimeout = d.WALFullTimeout |
|||
} |
|||
if c.FlushInterval == 0 { |
|||
c.FlushInterval = d.FlushInterval |
|||
} |
|||
if c.DirtyMapShards == 0 { |
|||
c.DirtyMapShards = d.DirtyMapShards |
|||
} |
|||
} |
|||
|
|||
var errInvalidConfig = errors.New("blockvol: invalid config") |
|||
|
|||
// Validate checks that config values are within acceptable ranges.
|
|||
func (c *BlockVolConfig) Validate() error { |
|||
if c.DirtyMapShards <= 0 || (c.DirtyMapShards&(c.DirtyMapShards-1)) != 0 { |
|||
return fmt.Errorf("%w: DirtyMapShards must be a positive power-of-2, got %d", errInvalidConfig, c.DirtyMapShards) |
|||
} |
|||
if c.WALPressureThreshold <= 0 || c.WALPressureThreshold > 1 { |
|||
return fmt.Errorf("%w: WALPressureThreshold must be in (0,1], got %f", errInvalidConfig, c.WALPressureThreshold) |
|||
} |
|||
if c.GroupCommitMaxDelay <= 0 { |
|||
return fmt.Errorf("%w: GroupCommitMaxDelay must be positive, got %v", errInvalidConfig, c.GroupCommitMaxDelay) |
|||
} |
|||
if c.GroupCommitMaxBatch <= 0 { |
|||
return fmt.Errorf("%w: GroupCommitMaxBatch must be positive, got %d", errInvalidConfig, c.GroupCommitMaxBatch) |
|||
} |
|||
if c.GroupCommitLowWatermark < 0 { |
|||
return fmt.Errorf("%w: GroupCommitLowWatermark must be >= 0, got %d", errInvalidConfig, c.GroupCommitLowWatermark) |
|||
} |
|||
if c.WALFullTimeout <= 0 { |
|||
return fmt.Errorf("%w: WALFullTimeout must be positive, got %v", errInvalidConfig, c.WALFullTimeout) |
|||
} |
|||
if c.FlushInterval <= 0 { |
|||
return fmt.Errorf("%w: FlushInterval must be positive, got %v", errInvalidConfig, c.FlushInterval) |
|||
} |
|||
return nil |
|||
} |
|||
@ -0,0 +1,115 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"errors" |
|||
"testing" |
|||
"time" |
|||
) |
|||
|
|||
func TestBlockVolConfig(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
run func(t *testing.T) |
|||
}{ |
|||
{name: "config_defaults", run: testConfigDefaults}, |
|||
{name: "config_validate_good", run: testConfigValidateGood}, |
|||
{name: "config_validate_bad_shards", run: testConfigValidateBadShards}, |
|||
{name: "config_validate_bad_threshold", run: testConfigValidateBadThreshold}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
tt.run(t) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
func testConfigDefaults(t *testing.T) { |
|||
cfg := DefaultConfig() |
|||
|
|||
if cfg.GroupCommitMaxDelay != 1*time.Millisecond { |
|||
t.Errorf("GroupCommitMaxDelay = %v, want 1ms", cfg.GroupCommitMaxDelay) |
|||
} |
|||
if cfg.GroupCommitMaxBatch != 64 { |
|||
t.Errorf("GroupCommitMaxBatch = %d, want 64", cfg.GroupCommitMaxBatch) |
|||
} |
|||
if cfg.GroupCommitLowWatermark != 4 { |
|||
t.Errorf("GroupCommitLowWatermark = %d, want 4", cfg.GroupCommitLowWatermark) |
|||
} |
|||
if cfg.WALPressureThreshold != 0.8 { |
|||
t.Errorf("WALPressureThreshold = %f, want 0.8", cfg.WALPressureThreshold) |
|||
} |
|||
if cfg.WALFullTimeout != 5*time.Second { |
|||
t.Errorf("WALFullTimeout = %v, want 5s", cfg.WALFullTimeout) |
|||
} |
|||
if cfg.FlushInterval != 100*time.Millisecond { |
|||
t.Errorf("FlushInterval = %v, want 100ms", cfg.FlushInterval) |
|||
} |
|||
if cfg.DirtyMapShards != 256 { |
|||
t.Errorf("DirtyMapShards = %d, want 256", cfg.DirtyMapShards) |
|||
} |
|||
|
|||
if err := cfg.Validate(); err != nil { |
|||
t.Errorf("DefaultConfig().Validate() = %v, want nil", err) |
|||
} |
|||
} |
|||
|
|||
func testConfigValidateGood(t *testing.T) { |
|||
cases := []BlockVolConfig{ |
|||
DefaultConfig(), |
|||
{ |
|||
GroupCommitMaxDelay: 5 * time.Millisecond, |
|||
GroupCommitMaxBatch: 128, |
|||
GroupCommitLowWatermark: 0, |
|||
WALPressureThreshold: 1.0, |
|||
WALFullTimeout: 10 * time.Second, |
|||
FlushInterval: 50 * time.Millisecond, |
|||
DirtyMapShards: 1, |
|||
}, |
|||
{ |
|||
GroupCommitMaxDelay: 1 * time.Microsecond, |
|||
GroupCommitMaxBatch: 1, |
|||
GroupCommitLowWatermark: 100, |
|||
WALPressureThreshold: 0.01, |
|||
WALFullTimeout: 1 * time.Millisecond, |
|||
FlushInterval: 1 * time.Millisecond, |
|||
DirtyMapShards: 1024, |
|||
}, |
|||
} |
|||
for i, cfg := range cases { |
|||
if err := cfg.Validate(); err != nil { |
|||
t.Errorf("case %d: Validate() = %v, want nil", i, err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func testConfigValidateBadShards(t *testing.T) { |
|||
cases := []int{0, 3, 5, 7, 10, 15, -1} |
|||
for _, shards := range cases { |
|||
cfg := DefaultConfig() |
|||
cfg.DirtyMapShards = shards |
|||
err := cfg.Validate() |
|||
if err == nil { |
|||
t.Errorf("DirtyMapShards=%d: expected error, got nil", shards) |
|||
continue |
|||
} |
|||
if !errors.Is(err, errInvalidConfig) { |
|||
t.Errorf("DirtyMapShards=%d: expected errInvalidConfig, got %v", shards, err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func testConfigValidateBadThreshold(t *testing.T) { |
|||
cases := []float64{0, -0.1, -1, 1.01, 2.0} |
|||
for _, thresh := range cases { |
|||
cfg := DefaultConfig() |
|||
cfg.WALPressureThreshold = thresh |
|||
err := cfg.Validate() |
|||
if err == nil { |
|||
t.Errorf("WALPressureThreshold=%f: expected error, got nil", thresh) |
|||
continue |
|||
} |
|||
if !errors.Is(err, errInvalidConfig) { |
|||
t.Errorf("WALPressureThreshold=%f: expected errInvalidConfig, got %v", thresh, err) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,69 @@ |
|||
package iscsi |
|||
|
|||
import ( |
|||
"encoding/binary" |
|||
"testing" |
|||
"time" |
|||
) |
|||
|
|||
|
|||
// TestBugCollectDataOutNoTimeout demonstrates that collectDataOut
|
|||
// blocks forever if the initiator stops sending Data-Out PDUs.
|
|||
//
|
|||
// BUG: collectDataOut calls ReadPDU(s.conn) in a loop with no
|
|||
// read deadline. If the initiator sends a WRITE requiring R2T,
|
|||
// receives the R2T, but never sends Data-Out, the session is stuck
|
|||
// forever (until TCP keepalive kills it, which could be minutes).
|
|||
//
|
|||
// FIX: Set a read deadline on s.conn before entering the Data-Out
|
|||
// collection loop (e.g., 30 seconds), and return a timeout error
|
|||
// if the deadline fires.
|
|||
//
|
|||
// This test FAILS until the bug is fixed.
|
|||
func TestBugCollectDataOutNoTimeout(t *testing.T) { |
|||
env := setupSessionWithConfig(t, func(cfg *TargetConfig) { |
|||
cfg.MaxRecvDataSegmentLength = 4096 |
|||
cfg.FirstBurstLength = 0 // force R2T (no immediate data accepted)
|
|||
cfg.DataOutTimeout = 1 * time.Second // short timeout for test
|
|||
}) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Send WRITE command that requires R2T (no immediate data).
|
|||
cmd := &PDU{} |
|||
cmd.SetOpcode(OpSCSICmd) |
|||
cmd.SetOpSpecific1(FlagF | FlagW) |
|||
cmd.SetInitiatorTaskTag(0xBEEF) |
|||
cmd.SetExpectedDataTransferLength(4096) |
|||
cmd.SetCmdSN(2) |
|||
var cdb [16]byte |
|||
cdb[0] = ScsiWrite10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], 0) // LBA 0
|
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) // 1 block
|
|||
cmd.SetCDB(cdb) |
|||
// No DataSegment — forces R2T.
|
|||
|
|||
if err := WritePDU(env.clientConn, cmd); err != nil { |
|||
t.Fatalf("WritePDU: %v", err) |
|||
} |
|||
|
|||
// Read R2T from server.
|
|||
r2t, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatalf("ReadPDU (R2T): %v", err) |
|||
} |
|||
if r2t.Opcode() != OpR2T { |
|||
t.Fatalf("expected R2T, got %s", OpcodeName(r2t.Opcode())) |
|||
} |
|||
|
|||
// DO NOT send Data-Out. The session should time out and close.
|
|||
// Currently it blocks forever in collectDataOut → ReadPDU(s.conn).
|
|||
|
|||
select { |
|||
case err := <-env.done: |
|||
// GOOD: session exited (timed out or errored out).
|
|||
t.Logf("session exited: %v", err) |
|||
case <-time.After(5 * time.Second): |
|||
env.clientConn.Close() |
|||
t.Fatal("collectDataOut did not time out within 5s") |
|||
} |
|||
} |
|||
@ -0,0 +1,114 @@ |
|||
package iscsi |
|||
|
|||
import ( |
|||
"encoding/binary" |
|||
"runtime" |
|||
"testing" |
|||
"time" |
|||
) |
|||
|
|||
// TestBugPendingQueueUnbounded demonstrates that the pending queue
|
|||
// in collectDataOut grows without bound.
|
|||
//
|
|||
// BUG: During Data-Out collection, any non-DataOut PDU is appended
|
|||
// to s.pending (session.go line 428) with no limit. A misbehaving
|
|||
// initiator can send thousands of PDUs during a Data-Out exchange,
|
|||
// causing unbounded memory growth (OOM risk).
|
|||
//
|
|||
// FIX: Cap s.pending at a reasonable limit (e.g., 64 entries).
|
|||
// Drop or reject excess PDUs with a REJECT response.
|
|||
//
|
|||
// This test FAILS until the bug is fixed.
|
|||
func TestBugPendingQueueUnbounded(t *testing.T) { |
|||
env := setupSessionWithConfig(t, func(cfg *TargetConfig) { |
|||
cfg.MaxRecvDataSegmentLength = 4096 |
|||
cfg.FirstBurstLength = 0 // force R2T
|
|||
}) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Start WRITE requiring R2T.
|
|||
cmd := &PDU{} |
|||
cmd.SetOpcode(OpSCSICmd) |
|||
cmd.SetOpSpecific1(FlagF | FlagW) |
|||
cmd.SetInitiatorTaskTag(0xAAAA) |
|||
cmd.SetExpectedDataTransferLength(4096) |
|||
cmd.SetCmdSN(2) |
|||
var cdb [16]byte |
|||
cdb[0] = ScsiWrite10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], 0) |
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) |
|||
cmd.SetCDB(cdb) |
|||
if err := WritePDU(env.clientConn, cmd); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Read R2T.
|
|||
r2t, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if r2t.Opcode() != OpR2T { |
|||
t.Fatalf("expected R2T, got %s", OpcodeName(r2t.Opcode())) |
|||
} |
|||
|
|||
// Flood with 200 NOP-Out PDUs during Data-Out collection.
|
|||
// These all get queued in s.pending with no limit.
|
|||
memBefore := getAlloc() |
|||
for i := 0; i < 200; i++ { |
|||
nop := &PDU{} |
|||
nop.SetOpcode(OpNOPOut) |
|||
nop.SetOpSpecific1(FlagF) |
|||
nop.SetInitiatorTaskTag(uint32(0xB000 + i)) |
|||
nop.SetImmediate(true) |
|||
if err := WritePDU(env.clientConn, nop); err != nil { |
|||
// If session closed or rejected, that's OK.
|
|||
t.Logf("NOP %d: write failed (session may have rejected): %v", i, err) |
|||
break |
|||
} |
|||
} |
|||
|
|||
// Complete the Data-Out to let the session process the pending queue.
|
|||
dataOut := &PDU{} |
|||
dataOut.SetOpcode(OpSCSIDataOut) |
|||
dataOut.SetOpSpecific1(FlagF) |
|||
dataOut.SetInitiatorTaskTag(0xAAAA) |
|||
dataOut.SetTargetTransferTag(r2t.TargetTransferTag()) |
|||
dataOut.SetBufferOffset(0) |
|||
dataOut.DataSegment = make([]byte, 4096) |
|||
if err := WritePDU(env.clientConn, dataOut); err != nil { |
|||
// Session may have closed if it correctly rejected the flood.
|
|||
t.Logf("Data-Out write: %v (may be expected if session rejected flood)", err) |
|||
} |
|||
|
|||
// Read responses. Count how many NOP-In responses we get.
|
|||
env.clientConn.SetReadDeadline(time.Now().Add(2 * time.Second)) |
|||
nopResponses := 0 |
|||
for { |
|||
resp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
break |
|||
} |
|||
if resp.Opcode() == OpNOPIn { |
|||
nopResponses++ |
|||
} |
|||
} |
|||
|
|||
memAfter := getAlloc() |
|||
t.Logf("pending flood: 200 NOPs sent, %d NOP-In responses, mem delta ~%d KB", |
|||
nopResponses, (memAfter-memBefore)/1024) |
|||
|
|||
// BUG: All 200 NOPs were queued in pending and processed.
|
|||
// A well-behaved server should cap the pending queue.
|
|||
// With a cap of 64, at most 64 NOP responses should arrive.
|
|||
const maxAcceptable = 64 |
|||
if nopResponses > maxAcceptable { |
|||
t.Fatalf("BUG: received %d NOP-In responses (all 200 queued in pending) -- "+ |
|||
"pending queue is unbounded, should be capped at %d", nopResponses, maxAcceptable) |
|||
} |
|||
} |
|||
|
|||
func getAlloc() uint64 { |
|||
var m runtime.MemStats |
|||
runtime.ReadMemStats(&m) |
|||
return m.Alloc |
|||
} |
|||
@ -0,0 +1,634 @@ |
|||
package iscsi |
|||
|
|||
import ( |
|||
"encoding/binary" |
|||
"io" |
|||
"log" |
|||
"net" |
|||
"runtime" |
|||
"sync" |
|||
"testing" |
|||
"time" |
|||
) |
|||
|
|||
// TestQAPhase3RXTX tests Phase 3 iSCSI RX/TX split adversarial scenarios.
|
|||
func TestQAPhase3RXTX(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
run func(t *testing.T) |
|||
}{ |
|||
// QA-2.1: Channel & Goroutine Safety
|
|||
{name: "rxtx_respch_full_backpressure", run: testQARXTXRespChFullBackpressure}, |
|||
{name: "rxtx_double_close_session", run: testQARXTXDoubleCloseSession}, |
|||
{name: "rxtx_txloop_goroutine_leak", run: testQARXTXTxLoopGoroutineLeak}, |
|||
{name: "rxtx_concurrent_session_50", run: testQARXTXConcurrentSession50}, |
|||
|
|||
// QA-2.2: Pending Queue & Data-Out
|
|||
{name: "rxtx_scsi_cmd_during_dataout", run: testQARXTXSCSICmdDuringDataOut}, |
|||
{name: "rxtx_nop_response_timing", run: testQARXTXNOPResponseTiming}, |
|||
|
|||
// QA-2.3: StatSN & Sequence Edge Cases
|
|||
{name: "rxtx_statsn_wrap", run: testQARXTXStatSNWrap}, |
|||
{name: "rxtx_expstsn_mismatch", run: testQARXTXExpStatSNMismatch}, |
|||
{name: "rxtx_statsn_after_error_response", run: testQARXTXStatSNAfterErrorResponse}, |
|||
|
|||
// QA-2.4: Shutdown Ordering
|
|||
{name: "rxtx_shutdown_writer_queued", run: testQARXTXShutdownWriterQueued}, |
|||
{name: "rxtx_target_close_active_io", run: testQARXTXTargetCloseActiveIO}, |
|||
{name: "rxtx_session_after_target_close", run: testQARXTXSessionAfterTargetClose}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
tt.run(t) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// --- QA-2.1: Channel & Goroutine Safety ---
|
|||
|
|||
func testQARXTXRespChFullBackpressure(t *testing.T) { |
|||
// With net.Pipe, writes block when reader isn't consuming, so we can't
|
|||
// truly fill respCh without reading. Instead, test that closing the
|
|||
// connection while enqueue is potentially blocked works cleanly.
|
|||
env := setupSession(t) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Start reading responses in background so writes don't block on net.Pipe.
|
|||
readDone := make(chan int, 1) |
|||
go func() { |
|||
count := 0 |
|||
for { |
|||
_, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
break |
|||
} |
|||
count++ |
|||
} |
|||
readDone <- count |
|||
}() |
|||
|
|||
// Send 100 NOPs rapidly.
|
|||
sent := 0 |
|||
for i := 0; i < 100; i++ { |
|||
nop := &PDU{} |
|||
nop.SetOpcode(OpNOPOut) |
|||
nop.SetOpSpecific1(FlagF) |
|||
nop.SetInitiatorTaskTag(uint32(0x1000 + i)) |
|||
nop.SetImmediate(true) |
|||
if err := WritePDU(env.clientConn, nop); err != nil { |
|||
break |
|||
} |
|||
sent++ |
|||
} |
|||
|
|||
// Close conn to trigger cleanup.
|
|||
env.clientConn.Close() |
|||
|
|||
select { |
|||
case count := <-readDone: |
|||
t.Logf("backpressure: sent %d NOPs, received %d responses", sent, count) |
|||
case <-time.After(3 * time.Second): |
|||
t.Fatal("reader did not exit after conn close") |
|||
} |
|||
|
|||
select { |
|||
case <-env.done: |
|||
case <-time.After(3 * time.Second): |
|||
t.Fatal("session did not exit after conn close during backpressure") |
|||
} |
|||
} |
|||
|
|||
func testQARXTXDoubleCloseSession(t *testing.T) { |
|||
env := setupSession(t) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Close session twice -- should not panic.
|
|||
env.session.Close() |
|||
env.session.Close() |
|||
|
|||
select { |
|||
case <-env.done: |
|||
case <-time.After(2 * time.Second): |
|||
t.Fatal("session did not exit after double close") |
|||
} |
|||
} |
|||
|
|||
func testQARXTXTxLoopGoroutineLeak(t *testing.T) { |
|||
// Create and destroy 50 sessions rapidly. Goroutine count should not grow.
|
|||
baseGoroutines := runtime.NumGoroutine() |
|||
|
|||
for i := 0; i < 50; i++ { |
|||
client, server := net.Pipe() |
|||
dev := newMockDevice(256 * 4096) |
|||
config := DefaultTargetConfig() |
|||
config.TargetName = testTargetName |
|||
resolver := newTestResolverWithDevice(dev) |
|||
logger := log.New(io.Discard, "", 0) |
|||
|
|||
sess := NewSession(server, config, resolver, resolver, logger) |
|||
done := make(chan error, 1) |
|||
go func() { |
|||
done <- sess.HandleConnection() |
|||
}() |
|||
|
|||
// Login then immediately close.
|
|||
doLogin(t, client) |
|||
client.Close() |
|||
|
|||
select { |
|||
case <-done: |
|||
case <-time.After(2 * time.Second): |
|||
t.Fatalf("session %d did not exit", i) |
|||
} |
|||
server.Close() |
|||
} |
|||
|
|||
// Allow goroutines to settle.
|
|||
time.Sleep(100 * time.Millisecond) |
|||
runtime.GC() |
|||
time.Sleep(50 * time.Millisecond) |
|||
|
|||
finalGoroutines := runtime.NumGoroutine() |
|||
// Allow some slack for GC/runtime goroutines.
|
|||
if finalGoroutines > baseGoroutines+10 { |
|||
t.Errorf("goroutine leak: started with %d, ended with %d after 50 sessions", |
|||
baseGoroutines, finalGoroutines) |
|||
} |
|||
t.Logf("goroutine leak check: base=%d, final=%d", baseGoroutines, finalGoroutines) |
|||
} |
|||
|
|||
func testQARXTXConcurrentSession50(t *testing.T) { |
|||
// 50 concurrent sessions on same TargetServer, each doing I/O.
|
|||
ts, addr := qaSetupTarget(t) |
|||
_ = ts |
|||
|
|||
var wg sync.WaitGroup |
|||
wg.Add(50) |
|||
for i := 0; i < 50; i++ { |
|||
go func(id int) { |
|||
defer wg.Done() |
|||
conn, err := net.DialTimeout("tcp", addr, 2*time.Second) |
|||
if err != nil { |
|||
t.Logf("session %d: dial failed: %v", id, err) |
|||
return |
|||
} |
|||
defer conn.Close() |
|||
|
|||
doLogin(t, conn) |
|||
|
|||
// Write 1 block.
|
|||
data := make([]byte, 4096) |
|||
data[0] = byte(id) |
|||
lba := uint32(id % 256) |
|||
sendSCSIWriteImmediate(t, conn, lba, data, uint32(id+100), 2) |
|||
resp, err := ReadPDU(conn) |
|||
if err != nil { |
|||
t.Logf("session %d: write response read failed: %v", id, err) |
|||
return |
|||
} |
|||
if resp.Opcode() != OpSCSIResp { |
|||
t.Logf("session %d: expected SCSI-Response, got %s", id, OpcodeName(resp.Opcode())) |
|||
} |
|||
}(i) |
|||
} |
|||
|
|||
done := make(chan struct{}) |
|||
go func() { |
|||
wg.Wait() |
|||
close(done) |
|||
}() |
|||
|
|||
select { |
|||
case <-done: |
|||
case <-time.After(10 * time.Second): |
|||
t.Fatal("50 concurrent sessions did not complete in 10s") |
|||
} |
|||
} |
|||
|
|||
// --- QA-2.2: Pending Queue & Data-Out ---
|
|||
|
|||
func testQARXTXSCSICmdDuringDataOut(t *testing.T) { |
|||
// Start WRITE requiring R2T, send a READ_10 during Data-Out exchange.
|
|||
// The READ should be queued in pending and executed after Data-Out completes.
|
|||
env := setupSessionWithConfig(t, func(cfg *TargetConfig) { |
|||
cfg.MaxRecvDataSegmentLength = 4096 |
|||
cfg.FirstBurstLength = 0 // force R2T
|
|||
}) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Pre-write data to LBA 10 so we can read it back.
|
|||
preData := make([]byte, 4096) |
|||
for i := range preData { |
|||
preData[i] = 0xBB |
|||
} |
|||
sendSCSIWriteImmediate(t, env.clientConn, 10, preData, 0x50, 2) |
|||
resp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatalf("pre-write read response: %v", err) |
|||
} |
|||
if resp.Opcode() != OpSCSIResp { |
|||
t.Fatalf("pre-write: expected SCSI-Response, got %s", OpcodeName(resp.Opcode())) |
|||
} |
|||
|
|||
// Start WRITE to LBA 0 with no immediate data (requires R2T).
|
|||
writeData := make([]byte, 4096) |
|||
for i := range writeData { |
|||
writeData[i] = 0xAA |
|||
} |
|||
cmd := &PDU{} |
|||
cmd.SetOpcode(OpSCSICmd) |
|||
cmd.SetOpSpecific1(FlagF | FlagW) |
|||
cmd.SetInitiatorTaskTag(0x100) |
|||
cmd.SetExpectedDataTransferLength(4096) |
|||
cmd.SetCmdSN(3) |
|||
var cdb [16]byte |
|||
cdb[0] = ScsiWrite10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], 0) // LBA 0
|
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) // 1 block
|
|||
cmd.SetCDB(cdb) |
|||
// No DataSegment (no immediate data).
|
|||
if err := WritePDU(env.clientConn, cmd); err != nil { |
|||
t.Fatalf("write cmd: %v", err) |
|||
} |
|||
|
|||
// Read R2T.
|
|||
r2t, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatalf("read R2T: %v", err) |
|||
} |
|||
if r2t.Opcode() != OpR2T { |
|||
t.Fatalf("expected R2T, got %s", OpcodeName(r2t.Opcode())) |
|||
} |
|||
|
|||
// While Data-Out is expected, send a READ_10 for LBA 10.
|
|||
sendSCSIRead(t, env.clientConn, 10, 1, 0x200, 4) |
|||
|
|||
// Now send Data-Out to complete the WRITE.
|
|||
dataOut := &PDU{} |
|||
dataOut.SetOpcode(OpSCSIDataOut) |
|||
dataOut.SetOpSpecific1(FlagF) |
|||
dataOut.SetInitiatorTaskTag(0x100) |
|||
dataOut.SetTargetTransferTag(r2t.TargetTransferTag()) |
|||
dataOut.SetBufferOffset(0) |
|||
dataOut.DataSegment = writeData |
|||
if err := WritePDU(env.clientConn, dataOut); err != nil { |
|||
t.Fatalf("data-out: %v", err) |
|||
} |
|||
|
|||
// Should receive WRITE response first, then READ response.
|
|||
resp1, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatalf("resp1: %v", err) |
|||
} |
|||
|
|||
resp2, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatalf("resp2: %v", err) |
|||
} |
|||
|
|||
// First response should be WRITE (ITT=0x100).
|
|||
if resp1.InitiatorTaskTag() != 0x100 { |
|||
t.Errorf("first response ITT=0x%x, want 0x100 (WRITE)", resp1.InitiatorTaskTag()) |
|||
} |
|||
|
|||
// Second response should be READ Data-In or SCSI-Response (ITT=0x200).
|
|||
if resp2.InitiatorTaskTag() != 0x200 { |
|||
t.Errorf("second response ITT=0x%x, want 0x200 (READ)", resp2.InitiatorTaskTag()) |
|||
} |
|||
} |
|||
|
|||
func testQARXTXNOPResponseTiming(t *testing.T) { |
|||
// Send NOP-Out during R2T exchange. NOP-In should arrive after WRITE response.
|
|||
env := setupSessionWithConfig(t, func(cfg *TargetConfig) { |
|||
cfg.MaxRecvDataSegmentLength = 4096 |
|||
cfg.FirstBurstLength = 0 // force R2T
|
|||
}) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Start WRITE requiring R2T.
|
|||
cmd := &PDU{} |
|||
cmd.SetOpcode(OpSCSICmd) |
|||
cmd.SetOpSpecific1(FlagF | FlagW) |
|||
cmd.SetInitiatorTaskTag(0x300) |
|||
cmd.SetExpectedDataTransferLength(4096) |
|||
cmd.SetCmdSN(2) |
|||
var cdb [16]byte |
|||
cdb[0] = ScsiWrite10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], 0) // LBA 0
|
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) // 1 block
|
|||
cmd.SetCDB(cdb) |
|||
if err := WritePDU(env.clientConn, cmd); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Read R2T.
|
|||
r2t, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if r2t.Opcode() != OpR2T { |
|||
t.Fatalf("expected R2T, got %s", OpcodeName(r2t.Opcode())) |
|||
} |
|||
|
|||
// Send NOP-Out during Data-Out collection.
|
|||
nop := &PDU{} |
|||
nop.SetOpcode(OpNOPOut) |
|||
nop.SetOpSpecific1(FlagF) |
|||
nop.SetInitiatorTaskTag(0x400) |
|||
nop.SetImmediate(true) |
|||
if err := WritePDU(env.clientConn, nop); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Send Data-Out to complete WRITE.
|
|||
dataOut := &PDU{} |
|||
dataOut.SetOpcode(OpSCSIDataOut) |
|||
dataOut.SetOpSpecific1(FlagF) |
|||
dataOut.SetInitiatorTaskTag(0x300) |
|||
dataOut.SetTargetTransferTag(r2t.TargetTransferTag()) |
|||
dataOut.SetBufferOffset(0) |
|||
dataOut.DataSegment = make([]byte, 4096) |
|||
if err := WritePDU(env.clientConn, dataOut); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Collect responses: WRITE response (0x300) and NOP-In (0x400).
|
|||
var responses []*PDU |
|||
env.clientConn.SetReadDeadline(time.Now().Add(2 * time.Second)) |
|||
for i := 0; i < 2; i++ { |
|||
resp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatalf("response %d: %v", i, err) |
|||
} |
|||
responses = append(responses, resp) |
|||
} |
|||
|
|||
// WRITE response should come first (Data-Out completed, then pending NOP processed).
|
|||
if responses[0].InitiatorTaskTag() != 0x300 { |
|||
t.Errorf("first response ITT=0x%x, want 0x300 (WRITE)", responses[0].InitiatorTaskTag()) |
|||
} |
|||
if responses[1].InitiatorTaskTag() != 0x400 { |
|||
t.Errorf("second response ITT=0x%x, want 0x400 (NOP)", responses[1].InitiatorTaskTag()) |
|||
} |
|||
if responses[1].Opcode() != OpNOPIn { |
|||
t.Errorf("second response opcode=%s, want NOP-In", OpcodeName(responses[1].Opcode())) |
|||
} |
|||
} |
|||
|
|||
// --- QA-2.3: StatSN & Sequence Edge Cases ---
|
|||
|
|||
func testQARXTXStatSNWrap(t *testing.T) { |
|||
// Drive StatSN close to 0xFFFFFFFF and verify it wraps to 0.
|
|||
env := setupSession(t) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// We can't easily set the initial StatSN, but we can observe the
|
|||
// current StatSN from a response and then verify monotonic increment.
|
|||
// Send 5 NOP-Outs and verify StatSN increases by 1 each time.
|
|||
var statSNs []uint32 |
|||
for i := 0; i < 5; i++ { |
|||
nop := &PDU{} |
|||
nop.SetOpcode(OpNOPOut) |
|||
nop.SetOpSpecific1(FlagF) |
|||
nop.SetInitiatorTaskTag(uint32(0x1000 + i)) |
|||
nop.SetImmediate(true) |
|||
if err := WritePDU(env.clientConn, nop); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
resp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
statSNs = append(statSNs, resp.StatSN()) |
|||
} |
|||
|
|||
// Verify monotonic increment.
|
|||
for i := 1; i < len(statSNs); i++ { |
|||
expected := statSNs[i-1] + 1 |
|||
if statSNs[i] != expected { |
|||
t.Errorf("StatSN[%d] = %d, want %d (monotonic)", i, statSNs[i], expected) |
|||
} |
|||
} |
|||
t.Logf("StatSN sequence: %v", statSNs) |
|||
} |
|||
|
|||
func testQARXTXExpStatSNMismatch(t *testing.T) { |
|||
// Send command with ExpStatSN != server's StatSN. Per RFC 7143,
|
|||
// ExpStatSN is advisory and should not cause rejection.
|
|||
env := setupSession(t) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Send NOP with wrong ExpStatSN.
|
|||
nop := &PDU{} |
|||
nop.SetOpcode(OpNOPOut) |
|||
nop.SetOpSpecific1(FlagF) |
|||
nop.SetInitiatorTaskTag(0x5000) |
|||
nop.SetImmediate(true) |
|||
// Set ExpStatSN to a wrong value.
|
|||
binary.BigEndian.PutUint32(nop.BHS[28:32], 0xDEADBEEF) |
|||
|
|||
if err := WritePDU(env.clientConn, nop); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
resp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Should still get NOP-In response (ExpStatSN mismatch is not an error).
|
|||
if resp.Opcode() != OpNOPIn { |
|||
t.Errorf("expected NOP-In, got %s", OpcodeName(resp.Opcode())) |
|||
} |
|||
if resp.InitiatorTaskTag() != 0x5000 { |
|||
t.Errorf("ITT = 0x%x, want 0x5000", resp.InitiatorTaskTag()) |
|||
} |
|||
} |
|||
|
|||
func testQARXTXStatSNAfterErrorResponse(t *testing.T) { |
|||
// SCSI error response should still increment StatSN.
|
|||
env := setupSession(t) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// First: send a NOP to get current StatSN.
|
|||
nop := &PDU{} |
|||
nop.SetOpcode(OpNOPOut) |
|||
nop.SetOpSpecific1(FlagF) |
|||
nop.SetInitiatorTaskTag(0x6000) |
|||
nop.SetImmediate(true) |
|||
if err := WritePDU(env.clientConn, nop); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
nopResp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
baseSN := nopResp.StatSN() |
|||
|
|||
// Second: send READ to out-of-range LBA (causes CHECK_CONDITION).
|
|||
var cdb [16]byte |
|||
cdb[0] = ScsiRead10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], 0xFFFFFFF0) // huge LBA
|
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) |
|||
cmd := &PDU{} |
|||
cmd.SetOpcode(OpSCSICmd) |
|||
cmd.SetOpSpecific1(FlagF | FlagR) |
|||
cmd.SetInitiatorTaskTag(0x6001) |
|||
cmd.SetExpectedDataTransferLength(4096) |
|||
cmd.SetCmdSN(2) |
|||
cmd.SetCDB(cdb) |
|||
if err := WritePDU(env.clientConn, cmd); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
errResp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if errResp.Opcode() != OpSCSIResp { |
|||
t.Fatalf("expected SCSI-Response, got %s", OpcodeName(errResp.Opcode())) |
|||
} |
|||
errSN := errResp.StatSN() |
|||
|
|||
// Third: send another NOP.
|
|||
nop2 := &PDU{} |
|||
nop2.SetOpcode(OpNOPOut) |
|||
nop2.SetOpSpecific1(FlagF) |
|||
nop2.SetInitiatorTaskTag(0x6002) |
|||
nop2.SetImmediate(true) |
|||
if err := WritePDU(env.clientConn, nop2); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
nop2Resp, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
afterSN := nop2Resp.StatSN() |
|||
|
|||
// Error response should have incremented StatSN.
|
|||
if errSN != baseSN+1 { |
|||
t.Errorf("error StatSN = %d, want %d (baseSN+1)", errSN, baseSN+1) |
|||
} |
|||
if afterSN != baseSN+2 { |
|||
t.Errorf("after error StatSN = %d, want %d (baseSN+2)", afterSN, baseSN+2) |
|||
} |
|||
t.Logf("StatSN: base=%d, error=%d, after=%d", baseSN, errSN, afterSN) |
|||
} |
|||
|
|||
// --- QA-2.4: Shutdown Ordering ---
|
|||
|
|||
func testQARXTXShutdownWriterQueued(t *testing.T) { |
|||
// Enqueue multiple responses, then disconnect.
|
|||
// Writer should drain all queued responses before exiting.
|
|||
env := setupSession(t) |
|||
doLogin(t, env.clientConn) |
|||
|
|||
// Send 10 NOP-Outs rapidly.
|
|||
for i := 0; i < 10; i++ { |
|||
nop := &PDU{} |
|||
nop.SetOpcode(OpNOPOut) |
|||
nop.SetOpSpecific1(FlagF) |
|||
nop.SetInitiatorTaskTag(uint32(0x7000 + i)) |
|||
nop.SetImmediate(true) |
|||
if err := WritePDU(env.clientConn, nop); err != nil { |
|||
t.Fatalf("NOP %d: %v", i, err) |
|||
} |
|||
} |
|||
|
|||
// Read all 10 responses.
|
|||
env.clientConn.SetReadDeadline(time.Now().Add(2 * time.Second)) |
|||
count := 0 |
|||
for i := 0; i < 10; i++ { |
|||
_, err := ReadPDU(env.clientConn) |
|||
if err != nil { |
|||
break |
|||
} |
|||
count++ |
|||
} |
|||
if count != 10 { |
|||
t.Errorf("received %d NOP-In responses, want 10", count) |
|||
} |
|||
} |
|||
|
|||
func testQARXTXTargetCloseActiveIO(t *testing.T) { |
|||
// Heavy I/O on multiple sessions, target.Close() mid-flight.
|
|||
ts, addr := qaSetupTarget(t) |
|||
|
|||
var wg sync.WaitGroup |
|||
wg.Add(4) |
|||
|
|||
// Start 4 sessions doing I/O.
|
|||
for i := 0; i < 4; i++ { |
|||
go func(id int) { |
|||
defer wg.Done() |
|||
conn, err := net.DialTimeout("tcp", addr, 2*time.Second) |
|||
if err != nil { |
|||
return |
|||
} |
|||
defer conn.Close() |
|||
|
|||
doLogin(t, conn) |
|||
|
|||
// Write loop until connection closes.
|
|||
for j := 0; j < 100; j++ { |
|||
data := make([]byte, 4096) |
|||
data[0] = byte(id) |
|||
sendSCSIWriteImmediate(t, conn, uint32(id), data, uint32(j+100), uint32(j+2)) |
|||
_, err := ReadPDU(conn) |
|||
if err != nil { |
|||
return // expected: target closed
|
|||
} |
|||
} |
|||
}(i) |
|||
} |
|||
|
|||
// Give sessions time to start I/O.
|
|||
time.Sleep(50 * time.Millisecond) |
|||
|
|||
// Close target while I/O is in progress.
|
|||
ts.Close() |
|||
|
|||
// All goroutines should exit cleanly.
|
|||
done := make(chan struct{}) |
|||
go func() { |
|||
wg.Wait() |
|||
close(done) |
|||
}() |
|||
|
|||
select { |
|||
case <-done: |
|||
case <-time.After(5 * time.Second): |
|||
t.Fatal("sessions did not exit after target.Close()") |
|||
} |
|||
} |
|||
|
|||
func testQARXTXSessionAfterTargetClose(t *testing.T) { |
|||
// Close target, then try to connect -- should fail or close immediately.
|
|||
ts, addr := qaSetupTarget(t) |
|||
|
|||
// Verify one session works.
|
|||
conn, err := net.DialTimeout("tcp", addr, 2*time.Second) |
|||
if err != nil { |
|||
t.Fatalf("initial dial: %v", err) |
|||
} |
|||
doLogin(t, conn) |
|||
conn.Close() |
|||
|
|||
// Close target.
|
|||
ts.Close() |
|||
|
|||
// New connection should fail.
|
|||
conn2, err := net.DialTimeout("tcp", addr, 500*time.Millisecond) |
|||
if err != nil { |
|||
// Expected: listener closed, dial fails.
|
|||
return |
|||
} |
|||
defer conn2.Close() |
|||
|
|||
// If dial succeeded (unlikely), the connection should be closed by server.
|
|||
conn2.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) |
|||
_, err = ReadPDU(conn2) |
|||
if err == nil { |
|||
t.Error("expected error reading from connection after target close") |
|||
} |
|||
} |
|||
@ -0,0 +1,536 @@ |
|||
package iscsi_test |
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/binary" |
|||
"io" |
|||
"log" |
|||
"net" |
|||
"path/filepath" |
|||
"sync" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol/iscsi" |
|||
) |
|||
|
|||
// TestQAPhase3Stability tests Phase 3 integration stability scenarios:
|
|||
// WAL pressure through iSCSI, crash recovery with sessions, lifecycle stress.
|
|||
func TestQAPhase3Stability(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
run func(t *testing.T) |
|||
}{ |
|||
// QA-3.1: WAL Pressure + iSCSI E2E
|
|||
{name: "e2e_sustained_write_1000", run: testE2ESustainedWrite1000}, |
|||
{name: "e2e_write_read_under_pressure", run: testE2EWriteReadUnderPressure}, |
|||
{name: "e2e_synccache_under_pressure", run: testE2ESyncCacheUnderPressure}, |
|||
{name: "e2e_multiple_sessions_shared_vol", run: testE2EMultipleSessionsSharedVol}, |
|||
|
|||
// QA-3.2: Crash Recovery + Session Reconnect
|
|||
{name: "e2e_crash_recovery_via_iscsi", run: testE2ECrashRecoveryViaISCSI}, |
|||
{name: "e2e_session_reconnect", run: testE2ESessionReconnect}, |
|||
|
|||
// QA-3.3: Lifecycle & Resource Stress
|
|||
{name: "e2e_rapid_open_close_target", run: testE2ERapidOpenCloseTarget}, |
|||
{name: "e2e_config_extreme_values", run: testE2EConfigExtremeValues}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
tt.run(t) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// --- Helpers ---
|
|||
|
|||
func setupStabilityTarget(t *testing.T, opts blockvol.CreateOptions, cfgs ...blockvol.BlockVolConfig) (*blockvol.BlockVol, net.Conn, *iscsi.TargetServer, string) { |
|||
t.Helper() |
|||
path := filepath.Join(t.TempDir(), "stability.blk") |
|||
|
|||
var vol *blockvol.BlockVol |
|||
var err error |
|||
if len(cfgs) > 0 { |
|||
vol, err = blockvol.CreateBlockVol(path, opts, cfgs[0]) |
|||
} else { |
|||
vol, err = blockvol.CreateBlockVol(path, opts) |
|||
} |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
adapter := &blockVolAdapter{vol: vol} |
|||
config := iscsi.DefaultTargetConfig() |
|||
config.TargetName = intTargetName |
|||
logger := log.New(io.Discard, "", 0) |
|||
ts := iscsi.NewTargetServer("127.0.0.1:0", config, logger) |
|||
ts.AddVolume(intTargetName, adapter) |
|||
|
|||
ln, err := net.Listen("tcp", "127.0.0.1:0") |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
addr := ln.Addr().String() |
|||
go ts.Serve(ln) |
|||
|
|||
conn, err := net.DialTimeout("tcp", addr, 2*time.Second) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Login
|
|||
stabilityDoLogin(t, conn) |
|||
|
|||
t.Cleanup(func() { |
|||
conn.Close() |
|||
ts.Close() |
|||
vol.Close() |
|||
}) |
|||
|
|||
return vol, conn, ts, addr |
|||
} |
|||
|
|||
func stabilityDoLogin(t *testing.T, conn net.Conn) { |
|||
t.Helper() |
|||
params := iscsi.NewParams() |
|||
params.Set("InitiatorName", intInitiatorName) |
|||
params.Set("TargetName", intTargetName) |
|||
params.Set("SessionType", "Normal") |
|||
|
|||
loginReq := &iscsi.PDU{} |
|||
loginReq.SetOpcode(iscsi.OpLoginReq) |
|||
loginReq.SetLoginStages(iscsi.StageSecurityNeg, iscsi.StageFullFeature) |
|||
loginReq.SetLoginTransit(true) |
|||
loginReq.SetISID([6]byte{0x00, 0x02, 0x3D, 0x00, 0x00, 0x01}) |
|||
loginReq.SetCmdSN(1) |
|||
loginReq.DataSegment = params.Encode() |
|||
|
|||
if err := iscsi.WritePDU(conn, loginReq); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
resp, err := iscsi.ReadPDU(conn) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if resp.LoginStatusClass() != iscsi.LoginStatusSuccess { |
|||
t.Fatalf("login failed: %d/%d", resp.LoginStatusClass(), resp.LoginStatusDetail()) |
|||
} |
|||
} |
|||
|
|||
func stabilityWrite(t *testing.T, conn net.Conn, lba uint32, data []byte, cmdSN uint32) { |
|||
t.Helper() |
|||
var cdb [16]byte |
|||
cdb[0] = iscsi.ScsiWrite10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], lba) |
|||
blocks := uint16(len(data) / 4096) |
|||
binary.BigEndian.PutUint16(cdb[7:9], blocks) |
|||
resp := sendSCSICmd(t, conn, cdb, cmdSN, false, true, data, uint32(len(data))) |
|||
if resp.SCSIStatus() != iscsi.SCSIStatusGood { |
|||
t.Fatalf("write LBA %d failed: status %d", lba, resp.SCSIStatus()) |
|||
} |
|||
} |
|||
|
|||
func stabilityRead(t *testing.T, conn net.Conn, lba uint32, cmdSN uint32) []byte { |
|||
t.Helper() |
|||
var cdb [16]byte |
|||
cdb[0] = iscsi.ScsiRead10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], lba) |
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) |
|||
resp := sendSCSICmd(t, conn, cdb, cmdSN, true, false, nil, 4096) |
|||
if resp.Opcode() != iscsi.OpSCSIDataIn { |
|||
t.Fatalf("read LBA %d: expected Data-In, got %s", lba, iscsi.OpcodeName(resp.Opcode())) |
|||
} |
|||
return resp.DataSegment |
|||
} |
|||
|
|||
// --- QA-3.1: WAL Pressure + iSCSI E2E ---
|
|||
|
|||
func testE2ESustainedWrite1000(t *testing.T) { |
|||
// 1000 sequential WRITEs via iSCSI, flusher running.
|
|||
_, conn, _, _ := setupStabilityTarget(t, blockvol.CreateOptions{ |
|||
VolumeSize: 1024 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 256 * 1024, // 256KB WAL
|
|||
}) |
|||
|
|||
cmdSN := uint32(2) |
|||
for i := 0; i < 1000; i++ { |
|||
lba := uint32(i % 1024) |
|||
data := make([]byte, 4096) |
|||
data[0] = byte(i % 256) |
|||
data[1] = byte(i / 256) |
|||
|
|||
var cdb [16]byte |
|||
cdb[0] = iscsi.ScsiWrite10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], lba) |
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) |
|||
|
|||
resp := sendSCSICmd(t, conn, cdb, cmdSN, false, true, data, 4096) |
|||
if resp.SCSIStatus() != iscsi.SCSIStatusGood { |
|||
t.Fatalf("write %d (LBA %d) failed: status %d", i, lba, resp.SCSIStatus()) |
|||
} |
|||
cmdSN++ |
|||
} |
|||
t.Log("sustained: 1000 WRITEs completed successfully") |
|||
} |
|||
|
|||
func testE2EWriteReadUnderPressure(t *testing.T) { |
|||
// Heavy writes to create WAL pressure, concurrent reads should still work.
|
|||
cfg := blockvol.DefaultConfig() |
|||
cfg.WALPressureThreshold = 0.3 |
|||
cfg.FlushInterval = 5 * time.Millisecond |
|||
|
|||
_, conn, _, _ := setupStabilityTarget(t, blockvol.CreateOptions{ |
|||
VolumeSize: 256 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 64 * 1024, // small WAL for pressure
|
|||
}, cfg) |
|||
|
|||
// Write to first 50 LBAs.
|
|||
cmdSN := uint32(2) |
|||
for i := 0; i < 50; i++ { |
|||
data := bytes.Repeat([]byte{byte(i)}, 4096) |
|||
stabilityWrite(t, conn, uint32(i), data, cmdSN) |
|||
cmdSN++ |
|||
} |
|||
|
|||
// Read them back under potential WAL pressure.
|
|||
for i := 0; i < 50; i++ { |
|||
readData := stabilityRead(t, conn, uint32(i), cmdSN) |
|||
cmdSN++ |
|||
expected := bytes.Repeat([]byte{byte(i)}, 4096) |
|||
if !bytes.Equal(readData, expected) { |
|||
t.Fatalf("LBA %d: data mismatch under pressure", i) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func testE2ESyncCacheUnderPressure(t *testing.T) { |
|||
// SYNC_CACHE while WAL is under pressure.
|
|||
cfg := blockvol.DefaultConfig() |
|||
cfg.WALPressureThreshold = 0.3 |
|||
cfg.FlushInterval = 5 * time.Millisecond |
|||
|
|||
_, conn, _, _ := setupStabilityTarget(t, blockvol.CreateOptions{ |
|||
VolumeSize: 256 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 64 * 1024, |
|||
}, cfg) |
|||
|
|||
// Write enough to create pressure.
|
|||
cmdSN := uint32(2) |
|||
for i := 0; i < 30; i++ { |
|||
data := bytes.Repeat([]byte{byte(i)}, 4096) |
|||
stabilityWrite(t, conn, uint32(i%256), data, cmdSN) |
|||
cmdSN++ |
|||
} |
|||
|
|||
// SYNC_CACHE should succeed even under pressure.
|
|||
var syncCDB [16]byte |
|||
syncCDB[0] = iscsi.ScsiSyncCache10 |
|||
resp := sendSCSICmd(t, conn, syncCDB, cmdSN, false, false, nil, 0) |
|||
if resp.SCSIStatus() != iscsi.SCSIStatusGood { |
|||
t.Fatalf("SYNC_CACHE under pressure failed: status %d", resp.SCSIStatus()) |
|||
} |
|||
} |
|||
|
|||
func testE2EMultipleSessionsSharedVol(t *testing.T) { |
|||
// 4 sessions on same BlockVol, each writing to different LBA ranges.
|
|||
path := filepath.Join(t.TempDir(), "shared.blk") |
|||
vol, err := blockvol.CreateBlockVol(path, blockvol.CreateOptions{ |
|||
VolumeSize: 1024 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 512 * 1024, |
|||
}) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
adapter := &blockVolAdapter{vol: vol} |
|||
config := iscsi.DefaultTargetConfig() |
|||
config.TargetName = intTargetName |
|||
logger := log.New(io.Discard, "", 0) |
|||
ts := iscsi.NewTargetServer("127.0.0.1:0", config, logger) |
|||
ts.AddVolume(intTargetName, adapter) |
|||
|
|||
ln, err := net.Listen("tcp", "127.0.0.1:0") |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
addr := ln.Addr().String() |
|||
go ts.Serve(ln) |
|||
defer ts.Close() |
|||
|
|||
var wg sync.WaitGroup |
|||
wg.Add(4) |
|||
for sess := 0; sess < 4; sess++ { |
|||
go func(id int) { |
|||
defer wg.Done() |
|||
conn, err := net.DialTimeout("tcp", addr, 2*time.Second) |
|||
if err != nil { |
|||
t.Logf("session %d: dial failed: %v", id, err) |
|||
return |
|||
} |
|||
defer conn.Close() |
|||
|
|||
stabilityDoLogin(t, conn) |
|||
|
|||
// Each session writes to its own LBA range (id*10 to id*10+9).
|
|||
cmdSN := uint32(2) |
|||
baseLBA := uint32(id * 10) |
|||
for i := 0; i < 10; i++ { |
|||
data := make([]byte, 4096) |
|||
data[0] = byte(id) |
|||
data[1] = byte(i) |
|||
|
|||
var cdb [16]byte |
|||
cdb[0] = iscsi.ScsiWrite10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], baseLBA+uint32(i)) |
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) |
|||
|
|||
resp := sendSCSICmd(t, conn, cdb, cmdSN, false, true, data, 4096) |
|||
if resp.SCSIStatus() != iscsi.SCSIStatusGood { |
|||
t.Logf("session %d: write %d failed", id, i) |
|||
return |
|||
} |
|||
cmdSN++ |
|||
} |
|||
|
|||
// Read back and verify.
|
|||
for i := 0; i < 10; i++ { |
|||
var cdb [16]byte |
|||
cdb[0] = iscsi.ScsiRead10 |
|||
binary.BigEndian.PutUint32(cdb[2:6], baseLBA+uint32(i)) |
|||
binary.BigEndian.PutUint16(cdb[7:9], 1) |
|||
|
|||
resp := sendSCSICmd(t, conn, cdb, cmdSN, true, false, nil, 4096) |
|||
if resp.Opcode() != iscsi.OpSCSIDataIn { |
|||
t.Logf("session %d: read %d: expected Data-In", id, i) |
|||
return |
|||
} |
|||
if resp.DataSegment[0] != byte(id) || resp.DataSegment[1] != byte(i) { |
|||
t.Errorf("session %d: LBA %d data mismatch: got [%d,%d], want [%d,%d]", |
|||
id, baseLBA+uint32(i), resp.DataSegment[0], resp.DataSegment[1], id, i) |
|||
} |
|||
cmdSN++ |
|||
} |
|||
}(sess) |
|||
} |
|||
wg.Wait() |
|||
} |
|||
|
|||
// --- QA-3.2: Crash Recovery + Session Reconnect ---
|
|||
|
|||
func testE2ECrashRecoveryViaISCSI(t *testing.T) { |
|||
// Write via iSCSI, close vol (simulating crash), reopen and verify data via direct read.
|
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "crash.blk") |
|||
|
|||
vol, err := blockvol.CreateBlockVol(path, blockvol.CreateOptions{ |
|||
VolumeSize: 256 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 128 * 1024, |
|||
}) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
adapter := &blockVolAdapter{vol: vol} |
|||
config := iscsi.DefaultTargetConfig() |
|||
config.TargetName = intTargetName |
|||
logger := log.New(io.Discard, "", 0) |
|||
ts := iscsi.NewTargetServer("127.0.0.1:0", config, logger) |
|||
ts.AddVolume(intTargetName, adapter) |
|||
|
|||
ln, err := net.Listen("tcp", "127.0.0.1:0") |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
go ts.Serve(ln) |
|||
|
|||
conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
stabilityDoLogin(t, conn) |
|||
|
|||
// Write 10 blocks via iSCSI.
|
|||
cmdSN := uint32(2) |
|||
for i := 0; i < 10; i++ { |
|||
data := bytes.Repeat([]byte{byte(i + 0xA0)}, 4096) |
|||
stabilityWrite(t, conn, uint32(i), data, cmdSN) |
|||
cmdSN++ |
|||
} |
|||
|
|||
// SYNC_CACHE to ensure WAL is durable.
|
|||
var syncCDB [16]byte |
|||
syncCDB[0] = iscsi.ScsiSyncCache10 |
|||
sendSCSICmd(t, conn, syncCDB, cmdSN, false, false, nil, 0) |
|||
|
|||
// Close everything (simulate crash).
|
|||
conn.Close() |
|||
ts.Close() |
|||
vol.Close() |
|||
|
|||
// Reopen and verify data.
|
|||
vol2, err := blockvol.OpenBlockVol(path) |
|||
if err != nil { |
|||
t.Fatalf("reopen: %v", err) |
|||
} |
|||
defer vol2.Close() |
|||
|
|||
for i := 0; i < 10; i++ { |
|||
data, err := vol2.ReadLBA(uint64(i), 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(%d) after recovery: %v", i, err) |
|||
} |
|||
expected := bytes.Repeat([]byte{byte(i + 0xA0)}, 4096) |
|||
if !bytes.Equal(data, expected) { |
|||
t.Fatalf("LBA %d: data mismatch after crash recovery", i) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func testE2ESessionReconnect(t *testing.T) { |
|||
// Write via session 1, disconnect, reconnect as session 2, read back.
|
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "reconnect.blk") |
|||
|
|||
vol, err := blockvol.CreateBlockVol(path, blockvol.CreateOptions{ |
|||
VolumeSize: 256 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 128 * 1024, |
|||
}) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
adapter := &blockVolAdapter{vol: vol} |
|||
config := iscsi.DefaultTargetConfig() |
|||
config.TargetName = intTargetName |
|||
logger := log.New(io.Discard, "", 0) |
|||
ts := iscsi.NewTargetServer("127.0.0.1:0", config, logger) |
|||
ts.AddVolume(intTargetName, adapter) |
|||
|
|||
ln, err := net.Listen("tcp", "127.0.0.1:0") |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
addr := ln.Addr().String() |
|||
go ts.Serve(ln) |
|||
defer ts.Close() |
|||
|
|||
// Session 1: write data.
|
|||
conn1, err := net.DialTimeout("tcp", addr, 2*time.Second) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
stabilityDoLogin(t, conn1) |
|||
|
|||
writeData := bytes.Repeat([]byte{0xDE}, 4096) |
|||
stabilityWrite(t, conn1, 5, writeData, 2) |
|||
conn1.Close() |
|||
|
|||
// Session 2: read back.
|
|||
conn2, err := net.DialTimeout("tcp", addr, 2*time.Second) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
defer conn2.Close() |
|||
stabilityDoLogin(t, conn2) |
|||
|
|||
readData := stabilityRead(t, conn2, 5, 2) |
|||
if !bytes.Equal(readData, writeData) { |
|||
t.Fatal("data mismatch after session reconnect") |
|||
} |
|||
} |
|||
|
|||
// --- QA-3.3: Lifecycle & Resource Stress ---
|
|||
|
|||
func testE2ERapidOpenCloseTarget(t *testing.T) { |
|||
// Open TargetServer, do I/O, close. Repeat 10 times. No fd/goroutine leak.
|
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "rapid.blk") |
|||
|
|||
vol, err := blockvol.CreateBlockVol(path, blockvol.CreateOptions{ |
|||
VolumeSize: 256 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 128 * 1024, |
|||
}) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
defer vol.Close() |
|||
|
|||
for cycle := 0; cycle < 10; cycle++ { |
|||
adapter := &blockVolAdapter{vol: vol} |
|||
config := iscsi.DefaultTargetConfig() |
|||
config.TargetName = intTargetName |
|||
logger := log.New(io.Discard, "", 0) |
|||
ts := iscsi.NewTargetServer("127.0.0.1:0", config, logger) |
|||
ts.AddVolume(intTargetName, adapter) |
|||
|
|||
ln, err := net.Listen("tcp", "127.0.0.1:0") |
|||
if err != nil { |
|||
t.Fatalf("cycle %d: listen: %v", cycle, err) |
|||
} |
|||
go ts.Serve(ln) |
|||
|
|||
conn, err := net.DialTimeout("tcp", ln.Addr().String(), 2*time.Second) |
|||
if err != nil { |
|||
t.Fatalf("cycle %d: dial: %v", cycle, err) |
|||
} |
|||
stabilityDoLogin(t, conn) |
|||
|
|||
// Write one block.
|
|||
data := bytes.Repeat([]byte{byte(cycle)}, 4096) |
|||
stabilityWrite(t, conn, uint32(cycle), data, 2) |
|||
|
|||
conn.Close() |
|||
ts.Close() |
|||
} |
|||
|
|||
// Verify all 10 LBAs have correct data.
|
|||
for i := 0; i < 10; i++ { |
|||
data, err := vol.ReadLBA(uint64(i), 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(%d) after 10 cycles: %v", i, err) |
|||
} |
|||
if data[0] != byte(i) { |
|||
t.Errorf("LBA %d: byte[0] = %d, want %d", i, data[0], i) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func testE2EConfigExtremeValues(t *testing.T) { |
|||
// Create BlockVol with extreme but valid config, use via iSCSI.
|
|||
cfg := blockvol.DefaultConfig() |
|||
cfg.GroupCommitMaxDelay = 100 * time.Microsecond // very fast
|
|||
cfg.DirtyMapShards = 1 // minimum
|
|||
cfg.FlushInterval = 1 * time.Millisecond |
|||
|
|||
_, conn, _, _ := setupStabilityTarget(t, blockvol.CreateOptions{ |
|||
VolumeSize: 64 * 4096, |
|||
BlockSize: 4096, |
|||
WALSize: 32 * 1024, // small WAL
|
|||
}, cfg) |
|||
|
|||
// Write + read should work.
|
|||
cmdSN := uint32(2) |
|||
for i := 0; i < 20; i++ { |
|||
data := bytes.Repeat([]byte{byte(i)}, 4096) |
|||
stabilityWrite(t, conn, uint32(i%64), data, cmdSN) |
|||
cmdSN++ |
|||
} |
|||
|
|||
// SYNC_CACHE.
|
|||
var syncCDB [16]byte |
|||
syncCDB[0] = iscsi.ScsiSyncCache10 |
|||
resp := sendSCSICmd(t, conn, syncCDB, cmdSN, false, false, nil, 0) |
|||
if resp.SCSIStatus() != iscsi.SCSIStatusGood { |
|||
t.Fatalf("SYNC_CACHE with extreme config failed: %d", resp.SCSIStatus()) |
|||
} |
|||
} |
|||
@ -0,0 +1,503 @@ |
|||
package blockvol |
|||
|
|||
import ( |
|||
"errors" |
|||
"math" |
|||
"path/filepath" |
|||
"sync" |
|||
"sync/atomic" |
|||
"testing" |
|||
"time" |
|||
) |
|||
|
|||
// TestQAPhase3Engine tests Phase 3 engine adversarial scenarios:
|
|||
// adaptive group commit, sharded dirty map, WAL pressure.
|
|||
func TestQAPhase3Engine(t *testing.T) { |
|||
tests := []struct { |
|||
name string |
|||
run func(t *testing.T) |
|||
}{ |
|||
// QA-1.1: Group Commit Adversarial
|
|||
{name: "gc_syncfunc_panic", run: testQAGCSyncFuncPanic}, |
|||
{name: "gc_thundering_herd_1000", run: testQAGCThunderingHerd1000}, |
|||
{name: "gc_lowwatermark_equals_maxbatch", run: testQAGCLowWatermarkEqualsMaxBatch}, |
|||
|
|||
// QA-1.2: Sharded Dirty Map Adversarial
|
|||
{name: "dm_snapshot_during_heavy_write", run: testQADMSnapshotDuringHeavyWrite}, |
|||
{name: "dm_put_delete_put_same_lba", run: testQADMPutDeletePutSameLBA}, |
|||
{name: "dm_max_uint64_lba", run: testQADMMaxUint64LBA}, |
|||
{name: "dm_non_power_of_2_shards", run: testQADMNonPowerOf2Shards}, |
|||
{name: "dm_zero_shards", run: testQADMZeroShards}, |
|||
{name: "dm_len_during_concurrent_writes", run: testQADMLenDuringConcurrentWrites}, |
|||
|
|||
// QA-1.3: WAL Pressure Adversarial
|
|||
{name: "wal_pressure_flusher_slow", run: testQAWALPressureFlusherSlow}, |
|||
{name: "wal_pressure_threshold_0", run: testQAWALPressureThreshold0}, |
|||
{name: "wal_pressure_threshold_1", run: testQAWALPressureThreshold1}, |
|||
{name: "wal_close_during_pressure_block", run: testQAWALCloseDuringPressureBlock}, |
|||
} |
|||
for _, tt := range tests { |
|||
t.Run(tt.name, func(t *testing.T) { |
|||
tt.run(t) |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// --- QA-1.1: Group Commit Adversarial ---
|
|||
|
|||
func testQAGCSyncFuncPanic(t *testing.T) { |
|||
// syncFunc panics: Run() goroutine crashes, pending waiters must not hang forever.
|
|||
gc := NewGroupCommitter(GroupCommitterConfig{ |
|||
SyncFunc: func() error { |
|||
panic("simulated disk panic") |
|||
}, |
|||
MaxDelay: 10 * time.Millisecond, |
|||
}) |
|||
|
|||
// Wrap Run() with panic recovery.
|
|||
runDone := make(chan struct{}) |
|||
go func() { |
|||
defer close(runDone) |
|||
defer func() { recover() }() |
|||
gc.Run() |
|||
}() |
|||
|
|||
// Submit should either return an error or hang. Use a timeout.
|
|||
result := make(chan error, 1) |
|||
go func() { |
|||
result <- gc.Submit() |
|||
}() |
|||
|
|||
select { |
|||
case <-result: |
|||
// Got a result (error or nil) -- panic was recovered, or waiter unblocked.
|
|||
case <-runDone: |
|||
// Run() exited due to panic. Submit will hang on channel read.
|
|||
// This is expected behavior: panic in syncFunc is fatal.
|
|||
// Test passes: no goroutine leak from Run(), panic was contained.
|
|||
case <-time.After(3 * time.Second): |
|||
t.Fatal("syncFunc panic: Run() and Submit both hung for 3s") |
|||
} |
|||
} |
|||
|
|||
func testQAGCThunderingHerd1000(t *testing.T) { |
|||
// 1000 goroutines Submit() simultaneously: all must return, batching should work.
|
|||
var syncCalls atomic.Uint64 |
|||
gc := NewGroupCommitter(GroupCommitterConfig{ |
|||
SyncFunc: func() error { |
|||
syncCalls.Add(1) |
|||
return nil |
|||
}, |
|||
MaxDelay: 50 * time.Millisecond, |
|||
MaxBatch: 64, |
|||
LowWatermark: 4, |
|||
}) |
|||
go gc.Run() |
|||
defer gc.Stop() |
|||
|
|||
const n = 1000 |
|||
var wg sync.WaitGroup |
|||
errs := make([]error, n) |
|||
wg.Add(n) |
|||
for i := 0; i < n; i++ { |
|||
go func(idx int) { |
|||
defer wg.Done() |
|||
errs[idx] = gc.Submit() |
|||
}(i) |
|||
} |
|||
|
|||
done := make(chan struct{}) |
|||
go func() { |
|||
wg.Wait() |
|||
close(done) |
|||
}() |
|||
|
|||
select { |
|||
case <-done: |
|||
case <-time.After(10 * time.Second): |
|||
t.Fatal("thundering herd: 1000 submits hung for 10s") |
|||
} |
|||
|
|||
for i, err := range errs { |
|||
if err != nil { |
|||
t.Errorf("Submit[%d]: %v", i, err) |
|||
} |
|||
} |
|||
|
|||
c := syncCalls.Load() |
|||
if c >= 1000 { |
|||
t.Errorf("syncCalls = %d, expected batching (< 1000)", c) |
|||
} |
|||
t.Logf("thundering herd: 1000 submits, %d fsyncs", c) |
|||
} |
|||
|
|||
func testQAGCLowWatermarkEqualsMaxBatch(t *testing.T) { |
|||
// lowWatermark = maxBatch = 8: skipDelay always true when < 8 pending.
|
|||
var syncCalls atomic.Uint64 |
|||
gc := NewGroupCommitter(GroupCommitterConfig{ |
|||
SyncFunc: func() error { |
|||
syncCalls.Add(1) |
|||
return nil |
|||
}, |
|||
MaxDelay: 5 * time.Second, // should never wait this long
|
|||
MaxBatch: 8, |
|||
LowWatermark: 8, |
|||
}) |
|||
go gc.Run() |
|||
defer gc.Stop() |
|||
|
|||
// Single serial submits: since pending < lowWatermark (8), skipDelay=true.
|
|||
for i := 0; i < 5; i++ { |
|||
if err := gc.Submit(); err != nil { |
|||
t.Fatalf("Submit %d: %v", i, err) |
|||
} |
|||
} |
|||
|
|||
// Each serial submit should have triggered its own fsync (no batching).
|
|||
if c := syncCalls.Load(); c != 5 { |
|||
t.Errorf("syncCalls = %d, want 5 (skipDelay always true)", c) |
|||
} |
|||
} |
|||
|
|||
// --- QA-1.2: Sharded Dirty Map Adversarial ---
|
|||
|
|||
func testQADMSnapshotDuringHeavyWrite(t *testing.T) { |
|||
dm := NewDirtyMap(256) |
|||
const numWriters = 16 |
|||
const opsPerWriter = 1000 |
|||
|
|||
var wg sync.WaitGroup |
|||
|
|||
// Writers: continuously Put entries.
|
|||
wg.Add(numWriters) |
|||
for g := 0; g < numWriters; g++ { |
|||
go func(id int) { |
|||
defer wg.Done() |
|||
base := uint64(id * opsPerWriter) |
|||
for i := uint64(0); i < opsPerWriter; i++ { |
|||
dm.Put(base+i, (base+i)*10, base+i+1, 4096) |
|||
} |
|||
}(g) |
|||
} |
|||
|
|||
// Concurrent snapshots.
|
|||
wg.Add(1) |
|||
go func() { |
|||
defer wg.Done() |
|||
for round := 0; round < 20; round++ { |
|||
snap := dm.Snapshot() |
|||
// Each snapshot entry should have valid fields.
|
|||
for _, e := range snap { |
|||
if e.Length != 4096 { |
|||
t.Errorf("snapshot entry LBA %d has length %d, want 4096", e.Lba, e.Length) |
|||
return |
|||
} |
|||
} |
|||
} |
|||
}() |
|||
|
|||
wg.Wait() |
|||
// Test passes if no race/panic.
|
|||
} |
|||
|
|||
func testQADMPutDeletePutSameLBA(t *testing.T) { |
|||
dm := NewDirtyMap(256) |
|||
const iterations = 1000 |
|||
|
|||
for i := 0; i < iterations; i++ { |
|||
dm.Put(100, uint64(i)*10, uint64(i)+1, 4096) |
|||
dm.Delete(100) |
|||
dm.Put(100, uint64(i)*10+5, uint64(i)+2, 4096) |
|||
} |
|||
|
|||
// Final state: should have the last Put's entry.
|
|||
off, lsn, length, ok := dm.Get(100) |
|||
if !ok { |
|||
t.Fatal("Get(100) not found after Put-Delete-Put cycle") |
|||
} |
|||
expectedOff := uint64(999)*10 + 5 |
|||
expectedLsn := uint64(999) + 2 |
|||
if off != expectedOff { |
|||
t.Errorf("walOffset = %d, want %d", off, expectedOff) |
|||
} |
|||
if lsn != expectedLsn { |
|||
t.Errorf("lsn = %d, want %d", lsn, expectedLsn) |
|||
} |
|||
if length != 4096 { |
|||
t.Errorf("length = %d, want 4096", length) |
|||
} |
|||
} |
|||
|
|||
func testQADMMaxUint64LBA(t *testing.T) { |
|||
dm := NewDirtyMap(256) |
|||
maxLBA := uint64(math.MaxUint64) |
|||
|
|||
dm.Put(maxLBA, 12345, 1, 4096) |
|||
|
|||
off, lsn, length, ok := dm.Get(maxLBA) |
|||
if !ok { |
|||
t.Fatal("Get(MaxUint64) not found") |
|||
} |
|||
if off != 12345 || lsn != 1 || length != 4096 { |
|||
t.Errorf("got (%d, %d, %d), want (12345, 1, 4096)", off, lsn, length) |
|||
} |
|||
|
|||
dm.Delete(maxLBA) |
|||
_, _, _, ok = dm.Get(maxLBA) |
|||
if ok { |
|||
t.Error("Get(MaxUint64) should not be found after delete") |
|||
} |
|||
} |
|||
|
|||
func testQADMNonPowerOf2Shards(t *testing.T) { |
|||
// Non-power-of-2: mask will be wrong (7-1=6=0b110), but should not panic.
|
|||
// This tests robustness, not correctness of shard distribution.
|
|||
dm := NewDirtyMap(7) |
|||
|
|||
// Should not panic on basic operations.
|
|||
for i := uint64(0); i < 100; i++ { |
|||
dm.Put(i, i*10, i+1, 4096) |
|||
} |
|||
|
|||
// All entries should be retrievable (mask-based routing is deterministic).
|
|||
for i := uint64(0); i < 100; i++ { |
|||
_, _, _, ok := dm.Get(i) |
|||
if !ok { |
|||
t.Errorf("Get(%d) not found with 7 shards", i) |
|||
} |
|||
} |
|||
|
|||
if dm.Len() != 100 { |
|||
t.Errorf("Len() = %d, want 100", dm.Len()) |
|||
} |
|||
} |
|||
|
|||
func testQADMZeroShards(t *testing.T) { |
|||
// NewDirtyMap(0) should default to 1 shard (no panic, no divide-by-zero).
|
|||
dm := NewDirtyMap(0) |
|||
|
|||
dm.Put(42, 100, 1, 4096) |
|||
off, _, _, ok := dm.Get(42) |
|||
if !ok { |
|||
t.Fatal("Get(42) not found with 0 shards (defaulted to 1)") |
|||
} |
|||
if off != 100 { |
|||
t.Errorf("walOffset = %d, want 100", off) |
|||
} |
|||
} |
|||
|
|||
func testQADMLenDuringConcurrentWrites(t *testing.T) { |
|||
dm := NewDirtyMap(256) |
|||
const numWriters = 32 |
|||
const opsPerWriter = 100 |
|||
|
|||
var wg sync.WaitGroup |
|||
|
|||
// Writers.
|
|||
wg.Add(numWriters) |
|||
for g := 0; g < numWriters; g++ { |
|||
go func(id int) { |
|||
defer wg.Done() |
|||
base := uint64(id * opsPerWriter) |
|||
for i := uint64(0); i < opsPerWriter; i++ { |
|||
dm.Put(base+i, (base+i)*10, base+i+1, 4096) |
|||
} |
|||
}(g) |
|||
} |
|||
|
|||
// Concurrent Len() reader.
|
|||
wg.Add(1) |
|||
go func() { |
|||
defer wg.Done() |
|||
for round := 0; round < 100; round++ { |
|||
n := dm.Len() |
|||
if n < 0 { |
|||
t.Errorf("Len() = %d, must be non-negative", n) |
|||
return |
|||
} |
|||
} |
|||
}() |
|||
|
|||
wg.Wait() |
|||
|
|||
// Final Len should be numWriters * opsPerWriter (each LBA unique).
|
|||
if dm.Len() != numWriters*opsPerWriter { |
|||
t.Errorf("final Len() = %d, want %d", dm.Len(), numWriters*opsPerWriter) |
|||
} |
|||
} |
|||
|
|||
// --- QA-1.3: WAL Pressure Adversarial ---
|
|||
|
|||
func testQAWALPressureFlusherSlow(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "slow_flush.blockvol") |
|||
|
|||
cfg := DefaultConfig() |
|||
cfg.WALPressureThreshold = 0.3 |
|||
cfg.WALFullTimeout = 3 * time.Second |
|||
cfg.FlushInterval = 5 * time.Millisecond |
|||
|
|||
v, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 1 * 1024 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 64 * 1024, |
|||
}, cfg) |
|||
if err != nil { |
|||
t.Fatalf("CreateBlockVol: %v", err) |
|||
} |
|||
defer v.Close() |
|||
|
|||
// Write many blocks. With a small WAL and active flusher, some may succeed
|
|||
// and some may encounter WAL pressure. None should hang forever.
|
|||
var succeeded, walFull int |
|||
for i := 0; i < 30; i++ { |
|||
err := v.WriteLBA(uint64(i%256), makeBlock(byte('A'+i%26))) |
|||
if err == nil { |
|||
succeeded++ |
|||
} else if errors.Is(err, ErrWALFull) { |
|||
walFull++ |
|||
} else { |
|||
t.Fatalf("WriteLBA(%d): unexpected error: %v", i, err) |
|||
} |
|||
} |
|||
t.Logf("flusher_slow: %d succeeded, %d WAL full", succeeded, walFull) |
|||
if succeeded == 0 { |
|||
t.Error("no writes succeeded -- flusher not draining?") |
|||
} |
|||
} |
|||
|
|||
func testQAWALPressureThreshold0(t *testing.T) { |
|||
// threshold=0: urgent flush on every write. Should still work.
|
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "thresh0.blockvol") |
|||
|
|||
cfg := DefaultConfig() |
|||
cfg.WALPressureThreshold = 0.0 // always urgent
|
|||
cfg.FlushInterval = 5 * time.Millisecond |
|||
|
|||
v, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 1 * 1024 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: 256 * 1024, |
|||
}, cfg) |
|||
if err != nil { |
|||
t.Fatalf("CreateBlockVol: %v", err) |
|||
} |
|||
defer v.Close() |
|||
|
|||
// Every write triggers urgent flush. Should still succeed.
|
|||
for i := 0; i < 20; i++ { |
|||
if err := v.WriteLBA(uint64(i), makeBlock(byte('A'+i%26))); err != nil { |
|||
t.Fatalf("WriteLBA(%d): %v", i, err) |
|||
} |
|||
} |
|||
|
|||
// Verify data readable.
|
|||
for i := 0; i < 20; i++ { |
|||
data, err := v.ReadLBA(uint64(i), 4096) |
|||
if err != nil { |
|||
t.Fatalf("ReadLBA(%d): %v", i, err) |
|||
} |
|||
if data[0] != byte('A'+i%26) { |
|||
t.Errorf("LBA %d: first byte = %c, want %c", i, data[0], byte('A'+i%26)) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func testQAWALPressureThreshold1(t *testing.T) { |
|||
// threshold=1.0: never urgent. WAL fills, eventually ErrWALFull.
|
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "thresh1.blockvol") |
|||
|
|||
cfg := DefaultConfig() |
|||
cfg.WALPressureThreshold = 1.0 // never urgent
|
|||
cfg.WALFullTimeout = 100 * time.Millisecond |
|||
cfg.FlushInterval = 1 * time.Hour // disable periodic flush
|
|||
|
|||
entrySize := uint64(walEntryHeaderSize + 4096) |
|||
walSize := entrySize * 4 // tiny: 4 entries
|
|||
|
|||
v, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 1 * 1024 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: walSize, |
|||
}, cfg) |
|||
if err != nil { |
|||
t.Fatalf("CreateBlockVol: %v", err) |
|||
} |
|||
defer v.Close() |
|||
|
|||
// Stop flusher to prevent any drainage.
|
|||
v.flusher.Stop() |
|||
|
|||
// Fill WAL.
|
|||
var gotWALFull bool |
|||
for i := 0; i < 10; i++ { |
|||
err := v.WriteLBA(uint64(i%256), makeBlock(byte('X'))) |
|||
if errors.Is(err, ErrWALFull) { |
|||
gotWALFull = true |
|||
break |
|||
} else if err != nil { |
|||
t.Fatalf("WriteLBA(%d): unexpected error: %v", i, err) |
|||
} |
|||
} |
|||
|
|||
if !gotWALFull { |
|||
t.Error("expected ErrWALFull with threshold=1.0 and no flusher") |
|||
} |
|||
} |
|||
|
|||
func testQAWALCloseDuringPressureBlock(t *testing.T) { |
|||
// Writer blocked on WAL full, Close() called -- writer should unblock.
|
|||
dir := t.TempDir() |
|||
path := filepath.Join(dir, "close_pressure.blockvol") |
|||
|
|||
cfg := DefaultConfig() |
|||
cfg.WALFullTimeout = 10 * time.Second // long timeout
|
|||
cfg.FlushInterval = 1 * time.Hour // disable flusher
|
|||
|
|||
entrySize := uint64(walEntryHeaderSize + 4096) |
|||
walSize := entrySize * 2 // tiny: 2 entries
|
|||
|
|||
v, err := CreateBlockVol(path, CreateOptions{ |
|||
VolumeSize: 1 * 1024 * 1024, |
|||
BlockSize: 4096, |
|||
WALSize: walSize, |
|||
}, cfg) |
|||
if err != nil { |
|||
t.Fatalf("CreateBlockVol: %v", err) |
|||
} |
|||
|
|||
// Stop flusher.
|
|||
v.flusher.Stop() |
|||
|
|||
// Fill WAL.
|
|||
for i := 0; i < 2; i++ { |
|||
if err := v.WriteLBA(uint64(i), makeBlock(byte('A'+i))); err != nil { |
|||
t.Fatalf("WriteLBA(%d): %v", i, err) |
|||
} |
|||
} |
|||
|
|||
// Start a writer that will block on WAL full.
|
|||
writeResult := make(chan error, 1) |
|||
go func() { |
|||
writeResult <- v.WriteLBA(2, makeBlock('Z')) |
|||
}() |
|||
|
|||
// Give it time to block.
|
|||
time.Sleep(50 * time.Millisecond) |
|||
|
|||
// Close the volume -- should unblock the writer.
|
|||
v.Close() |
|||
|
|||
select { |
|||
case err := <-writeResult: |
|||
// Writer unblocked (error expected: ErrWALFull or closed fd).
|
|||
if err == nil { |
|||
t.Error("expected error from WriteLBA after Close, got nil") |
|||
} |
|||
t.Logf("close_during_pressure: writer unblocked with: %v", err) |
|||
case <-time.After(5 * time.Second): |
|||
t.Fatal("writer still blocked 5s after Close()") |
|||
} |
|||
} |
|||
@ -0,0 +1,185 @@ |
|||
#!/usr/bin/env bash |
|||
# sw-block-attach.sh — Discover and attach a SeaweedFS block volume via iSCSI |
|||
# |
|||
# Prerequisites: |
|||
# - Linux host with iscsiadm (open-iscsi) installed |
|||
# - Root access (for iscsiadm login) |
|||
# - SeaweedFS volume server running with --block.dir set |
|||
# |
|||
# Usage: |
|||
# sudo ./sw-block-attach.sh <target-addr> [volume-name] |
|||
# |
|||
# Examples: |
|||
# sudo ./sw-block-attach.sh 10.0.0.1:3260 # discover all, attach first |
|||
# sudo ./sw-block-attach.sh 10.0.0.1:3260 myvol # attach specific volume |
|||
# sudo ./sw-block-attach.sh 10.0.0.1:3260 --list # list available targets |
|||
# sudo ./sw-block-attach.sh --detach <target-iqn> <portal> # detach a volume |
|||
# |
|||
# The script will: |
|||
# 1. Run iscsiadm discovery against the target portal |
|||
# 2. Find the matching volume IQN (or list all) |
|||
# 3. Login (attach) the iSCSI target |
|||
# 4. Print the attached block device path |
|||
|
|||
set -euo pipefail |
|||
|
|||
RED='\033[0;31m' |
|||
GREEN='\033[0;32m' |
|||
YELLOW='\033[1;33m' |
|||
NC='\033[0m' |
|||
|
|||
log() { echo -e "${GREEN}[sw-block]${NC} $*"; } |
|||
warn() { echo -e "${YELLOW}[sw-block]${NC} $*"; } |
|||
fail() { echo -e "${RED}[sw-block]${NC} $*" >&2; exit 1; } |
|||
|
|||
usage() { |
|||
cat <<'EOF' |
|||
Usage: |
|||
sw-block-attach.sh <target-addr:port> [volume-name] Attach a block volume |
|||
sw-block-attach.sh <target-addr:port> --list List available targets |
|||
sw-block-attach.sh --detach <target-iqn> <portal> Detach a block volume |
|||
sw-block-attach.sh --status Show active sessions |
|||
|
|||
Examples: |
|||
sudo ./sw-block-attach.sh 10.0.0.1:3260 |
|||
sudo ./sw-block-attach.sh 10.0.0.1:3260 myvol |
|||
sudo ./sw-block-attach.sh --detach iqn.2024-01.com.seaweedfs:vol.myvol 10.0.0.1:3260 |
|||
EOF |
|||
exit 1 |
|||
} |
|||
|
|||
# ------------------------------------------------------- |
|||
# Preflight |
|||
# ------------------------------------------------------- |
|||
if [[ $EUID -ne 0 ]]; then |
|||
fail "This script must be run as root (for iscsiadm)" |
|||
fi |
|||
|
|||
if ! command -v iscsiadm &>/dev/null; then |
|||
fail "iscsiadm not found. Install open-iscsi: apt install open-iscsi" |
|||
fi |
|||
|
|||
if [[ $# -lt 1 ]]; then |
|||
usage |
|||
fi |
|||
|
|||
# ------------------------------------------------------- |
|||
# --status: show active sessions |
|||
# ------------------------------------------------------- |
|||
if [[ "$1" == "--status" ]]; then |
|||
log "Active iSCSI sessions:" |
|||
iscsiadm -m session 2>/dev/null || echo " (none)" |
|||
exit 0 |
|||
fi |
|||
|
|||
# ------------------------------------------------------- |
|||
# --detach: logout from a target |
|||
# ------------------------------------------------------- |
|||
if [[ "$1" == "--detach" ]]; then |
|||
if [[ $# -lt 3 ]]; then |
|||
fail "Usage: sw-block-attach.sh --detach <target-iqn> <portal>" |
|||
fi |
|||
TARGET_IQN="$2" |
|||
PORTAL="$3" |
|||
|
|||
log "Logging out from $TARGET_IQN at $PORTAL..." |
|||
iscsiadm -m node -T "$TARGET_IQN" -p "$PORTAL" --logout || { |
|||
fail "Logout failed" |
|||
} |
|||
iscsiadm -m node -T "$TARGET_IQN" -p "$PORTAL" -o delete 2>/dev/null || true |
|||
log "Detached successfully" |
|||
exit 0 |
|||
fi |
|||
|
|||
# ------------------------------------------------------- |
|||
# Attach mode |
|||
# ------------------------------------------------------- |
|||
PORTAL="$1" |
|||
VOL_NAME="${2:-}" |
|||
|
|||
# Add default port if not specified. |
|||
if [[ "$PORTAL" != *:* ]]; then |
|||
PORTAL="${PORTAL}:3260" |
|||
fi |
|||
|
|||
# ------------------------------------------------------- |
|||
# Step 1: Discovery |
|||
# ------------------------------------------------------- |
|||
log "Discovering targets at $PORTAL..." |
|||
DISCOVERY=$(iscsiadm -m discovery -t sendtargets -p "$PORTAL" 2>&1) || { |
|||
fail "Discovery failed: $DISCOVERY" |
|||
} |
|||
|
|||
if [[ -z "$DISCOVERY" ]]; then |
|||
fail "No targets found at $PORTAL" |
|||
fi |
|||
|
|||
# ------------------------------------------------------- |
|||
# --list: just show targets |
|||
# ------------------------------------------------------- |
|||
if [[ "$VOL_NAME" == "--list" ]]; then |
|||
log "Available targets:" |
|||
echo "$DISCOVERY" |
|||
exit 0 |
|||
fi |
|||
|
|||
# ------------------------------------------------------- |
|||
# Step 2: Find target IQN |
|||
# ------------------------------------------------------- |
|||
if [[ -n "$VOL_NAME" ]]; then |
|||
# Match by volume name suffix. |
|||
TARGET_IQN=$(echo "$DISCOVERY" | awk '{print $2}' | grep -F "$VOL_NAME" | head -1) |
|||
if [[ -z "$TARGET_IQN" ]]; then |
|||
fail "No target found matching '$VOL_NAME'. Available targets:" |
|||
echo "$DISCOVERY" >&2 |
|||
fi |
|||
else |
|||
# Use the first target found. |
|||
TARGET_IQN=$(echo "$DISCOVERY" | awk '{print $2}' | head -1) |
|||
if [[ -z "$TARGET_IQN" ]]; then |
|||
fail "No targets found" |
|||
fi |
|||
fi |
|||
|
|||
log "Target: $TARGET_IQN" |
|||
|
|||
# ------------------------------------------------------- |
|||
# Step 3: Check if already logged in |
|||
# ------------------------------------------------------- |
|||
if iscsiadm -m session 2>/dev/null | grep -q "$TARGET_IQN"; then |
|||
warn "Already logged in to $TARGET_IQN" |
|||
# Show the device |
|||
DEV=$(iscsiadm -m session -P 3 2>/dev/null | grep -A 20 "$TARGET_IQN" | grep "Attached scsi disk" | awk '{print $4}' | head -1) |
|||
if [[ -n "$DEV" ]]; then |
|||
log "Device: /dev/$DEV" |
|||
fi |
|||
exit 0 |
|||
fi |
|||
|
|||
# ------------------------------------------------------- |
|||
# Step 4: Login |
|||
# ------------------------------------------------------- |
|||
log "Logging in to $TARGET_IQN..." |
|||
iscsiadm -m node -T "$TARGET_IQN" -p "$PORTAL" --login || { |
|||
fail "Login failed" |
|||
} |
|||
|
|||
# Wait for device to appear. |
|||
sleep 2 |
|||
|
|||
# ------------------------------------------------------- |
|||
# Step 5: Find attached device |
|||
# ------------------------------------------------------- |
|||
DEV=$(iscsiadm -m session -P 3 2>/dev/null | grep -A 20 "$TARGET_IQN" | grep "Attached scsi disk" | awk '{print $4}' | head -1) |
|||
if [[ -z "$DEV" ]]; then |
|||
warn "Login succeeded but could not determine device path" |
|||
warn "Check: lsblk, or iscsiadm -m session -P 3" |
|||
else |
|||
log "Attached: /dev/$DEV" |
|||
echo "" |
|||
echo -e " Block device: ${GREEN}/dev/$DEV${NC}" |
|||
echo -e " Target IQN: $TARGET_IQN" |
|||
echo -e " Portal: $PORTAL" |
|||
echo "" |
|||
echo " To detach: sudo $0 --detach $TARGET_IQN $PORTAL" |
|||
fi |
|||
@ -0,0 +1,93 @@ |
|||
package storage |
|||
|
|||
import ( |
|||
"fmt" |
|||
"sync" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol" |
|||
) |
|||
|
|||
// BlockVolumeStore manages block volumes (iSCSI-backed).
|
|||
// It is a standalone component held by VolumeServer, not embedded into Store,
|
|||
// to keep the existing Store codebase unchanged.
|
|||
type BlockVolumeStore struct { |
|||
mu sync.RWMutex |
|||
volumes map[string]*blockvol.BlockVol // keyed by volume file path
|
|||
} |
|||
|
|||
// NewBlockVolumeStore creates a new block volume manager.
|
|||
func NewBlockVolumeStore() *BlockVolumeStore { |
|||
return &BlockVolumeStore{ |
|||
volumes: make(map[string]*blockvol.BlockVol), |
|||
} |
|||
} |
|||
|
|||
// AddBlockVolume opens and registers a block volume.
|
|||
func (bs *BlockVolumeStore) AddBlockVolume(path string, cfgs ...blockvol.BlockVolConfig) (*blockvol.BlockVol, error) { |
|||
bs.mu.Lock() |
|||
defer bs.mu.Unlock() |
|||
|
|||
if _, ok := bs.volumes[path]; ok { |
|||
return nil, fmt.Errorf("block volume already registered: %s", path) |
|||
} |
|||
|
|||
vol, err := blockvol.OpenBlockVol(path, cfgs...) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("open block volume %s: %w", path, err) |
|||
} |
|||
|
|||
bs.volumes[path] = vol |
|||
glog.V(0).Infof("block volume registered: %s", path) |
|||
return vol, nil |
|||
} |
|||
|
|||
// RemoveBlockVolume closes and unregisters a block volume.
|
|||
func (bs *BlockVolumeStore) RemoveBlockVolume(path string) error { |
|||
bs.mu.Lock() |
|||
defer bs.mu.Unlock() |
|||
|
|||
vol, ok := bs.volumes[path] |
|||
if !ok { |
|||
return fmt.Errorf("block volume not found: %s", path) |
|||
} |
|||
|
|||
if err := vol.Close(); err != nil { |
|||
glog.Warningf("error closing block volume %s: %v", path, err) |
|||
} |
|||
delete(bs.volumes, path) |
|||
glog.V(0).Infof("block volume removed: %s", path) |
|||
return nil |
|||
} |
|||
|
|||
// GetBlockVolume returns a registered block volume by path.
|
|||
func (bs *BlockVolumeStore) GetBlockVolume(path string) (*blockvol.BlockVol, bool) { |
|||
bs.mu.RLock() |
|||
defer bs.mu.RUnlock() |
|||
vol, ok := bs.volumes[path] |
|||
return vol, ok |
|||
} |
|||
|
|||
// ListBlockVolumes returns the paths of all registered block volumes.
|
|||
func (bs *BlockVolumeStore) ListBlockVolumes() []string { |
|||
bs.mu.RLock() |
|||
defer bs.mu.RUnlock() |
|||
paths := make([]string, 0, len(bs.volumes)) |
|||
for p := range bs.volumes { |
|||
paths = append(paths, p) |
|||
} |
|||
return paths |
|||
} |
|||
|
|||
// Close closes all block volumes.
|
|||
func (bs *BlockVolumeStore) Close() { |
|||
bs.mu.Lock() |
|||
defer bs.mu.Unlock() |
|||
for path, vol := range bs.volumes { |
|||
if err := vol.Close(); err != nil { |
|||
glog.Warningf("error closing block volume %s: %v", path, err) |
|||
} |
|||
delete(bs.volumes, path) |
|||
} |
|||
glog.V(0).Infof("all block volumes closed") |
|||
} |
|||
@ -0,0 +1,109 @@ |
|||
package storage |
|||
|
|||
import ( |
|||
"path/filepath" |
|||
"testing" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/storage/blockvol" |
|||
) |
|||
|
|||
func createTestBlockVol(t *testing.T, dir, name string) string { |
|||
t.Helper() |
|||
path := filepath.Join(dir, name) |
|||
vol, err := blockvol.CreateBlockVol(path, blockvol.CreateOptions{ |
|||
VolumeSize: 1024 * 4096, |
|||
BlockSize: 4096, |
|||
ExtentSize: 65536, |
|||
WALSize: 1 << 20, |
|||
}) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
vol.Close() |
|||
return path |
|||
} |
|||
|
|||
func TestStoreAddBlockVolume(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := createTestBlockVol(t, dir, "vol1.blk") |
|||
|
|||
bs := NewBlockVolumeStore() |
|||
defer bs.Close() |
|||
|
|||
vol, err := bs.AddBlockVolume(path) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if vol == nil { |
|||
t.Fatal("expected non-nil volume") |
|||
} |
|||
|
|||
// Duplicate add should fail.
|
|||
_, err = bs.AddBlockVolume(path) |
|||
if err == nil { |
|||
t.Fatal("expected error on duplicate add") |
|||
} |
|||
|
|||
// Should be listed.
|
|||
paths := bs.ListBlockVolumes() |
|||
if len(paths) != 1 || paths[0] != path { |
|||
t.Fatalf("ListBlockVolumes: got %v", paths) |
|||
} |
|||
|
|||
// Should be gettable.
|
|||
got, ok := bs.GetBlockVolume(path) |
|||
if !ok || got != vol { |
|||
t.Fatal("GetBlockVolume failed") |
|||
} |
|||
} |
|||
|
|||
func TestStoreRemoveBlockVolume(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path := createTestBlockVol(t, dir, "vol1.blk") |
|||
|
|||
bs := NewBlockVolumeStore() |
|||
defer bs.Close() |
|||
|
|||
if _, err := bs.AddBlockVolume(path); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
if err := bs.RemoveBlockVolume(path); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Should be gone.
|
|||
if _, ok := bs.GetBlockVolume(path); ok { |
|||
t.Fatal("volume should have been removed") |
|||
} |
|||
|
|||
// Remove again should fail.
|
|||
if err := bs.RemoveBlockVolume(path); err == nil { |
|||
t.Fatal("expected error on double remove") |
|||
} |
|||
} |
|||
|
|||
func TestStoreCloseAllBlockVolumes(t *testing.T) { |
|||
dir := t.TempDir() |
|||
path1 := createTestBlockVol(t, dir, "vol1.blk") |
|||
path2 := createTestBlockVol(t, dir, "vol2.blk") |
|||
|
|||
bs := NewBlockVolumeStore() |
|||
|
|||
if _, err := bs.AddBlockVolume(path1); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if _, err := bs.AddBlockVolume(path2); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
if len(bs.ListBlockVolumes()) != 2 { |
|||
t.Fatalf("expected 2 volumes, got %d", len(bs.ListBlockVolumes())) |
|||
} |
|||
|
|||
bs.Close() |
|||
|
|||
if len(bs.ListBlockVolumes()) != 0 { |
|||
t.Fatalf("expected 0 volumes after close, got %d", len(bs.ListBlockVolumes())) |
|||
} |
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue