diff --git a/test/foundationdb/Dockerfile.build.arm64 b/test/foundationdb/Dockerfile.build.arm64 index 649dc257f..bf94625f1 100644 --- a/test/foundationdb/Dockerfile.build.arm64 +++ b/test/foundationdb/Dockerfile.build.arm64 @@ -5,13 +5,14 @@ ARG FOUNDATIONDB_VERSION=7.4.5 ENV FOUNDATIONDB_VERSION=${FOUNDATIONDB_VERSION} # Install build dependencies and download prebuilt FoundationDB clients -RUN apt-get update && apt-get install -y \ +SHELL ["/bin/bash", "-c"] +RUN set -euo pipefail && \ + apt-get update && apt-get install -y \ build-essential \ git \ wget \ ca-certificates \ && rm -rf /var/lib/apt/lists/* && \ - set -euo pipefail && \ case "${FOUNDATIONDB_VERSION}" in \ "7.4.5") EXPECTED_SHA256="f2176b86b7e1b561c3632b4e6e7efb82e3b8f57c2ff0d0ac4671e742867508aa" ;; \ *) echo "ERROR: No known ARM64 client checksum for FoundationDB ${FOUNDATIONDB_VERSION}. Please update this Dockerfile." >&2; exit 1 ;; \ diff --git a/test/foundationdb/Dockerfile.fdb-arm64 b/test/foundationdb/Dockerfile.fdb-arm64 index 7a09f726e..56c092291 100644 --- a/test/foundationdb/Dockerfile.fdb-arm64 +++ b/test/foundationdb/Dockerfile.fdb-arm64 @@ -15,6 +15,7 @@ RUN apt-get update && apt-get install -y \ && rm -rf /var/lib/apt/lists/* # Install FoundationDB server + client debs with checksum verification +SHELL ["/bin/bash", "-c"] RUN set -euo pipefail && \ apt-get update && \ case "${FOUNDATIONDB_VERSION}" in \ diff --git a/test/foundationdb/Makefile b/test/foundationdb/Makefile index ff106d7dc..eb8fe2ec5 100644 --- a/test/foundationdb/Makefile +++ b/test/foundationdb/Makefile @@ -50,6 +50,24 @@ test-benchmark: ## Run performance benchmarks @echo "$(YELLOW)Running FoundationDB performance benchmarks...$(NC)" @cd ../../ && go test -v -timeout=$(TEST_TIMEOUT) -tags foundationdb -bench=. ./test/foundationdb/... +test-benchmark-filer: ## Run filer store benchmarks (batch vs no-batch comparison) + @echo "$(YELLOW)Running FoundationDB filer store benchmarks...$(NC)" + @echo "$(BLUE)Comparing batched vs non-batched write performance$(NC)" + @cd ../../ && go test -v -timeout=$(TEST_TIMEOUT) -tags foundationdb \ + -bench='BenchmarkFoundationDBStore_.*' \ + -benchmem \ + -benchtime=5s \ + ./weed/filer/foundationdb/... + +test-benchmark-concurrent: ## Run concurrent operation benchmarks + @echo "$(YELLOW)Running concurrent operation benchmarks...$(NC)" + @cd ../../ && go test -v -timeout=$(TEST_TIMEOUT) -tags foundationdb \ + -bench='BenchmarkFoundationDBStore_Concurrent.*' \ + -benchmem \ + -benchtime=10s \ + -cpu=1,2,4,8 \ + ./weed/filer/foundationdb/... + # ARM64 specific targets (Apple Silicon / M1/M2/M3 Macs) setup-arm64: ## Set up ARM64-native FoundationDB cluster (builds from source) @echo "$(YELLOW)Setting up ARM64-native FoundationDB cluster...$(NC)" diff --git a/test/foundationdb/README.md b/test/foundationdb/README.md index ba1e7627a..0facc10e5 100644 --- a/test/foundationdb/README.md +++ b/test/foundationdb/README.md @@ -353,12 +353,31 @@ The tests are designed to be reliable in CI environments with: Run performance benchmarks: ```bash -make test-benchmark +make test-benchmark # Run all benchmarks +make test-benchmark-filer # Filer store benchmarks with batch comparison +make test-benchmark-concurrent # Concurrent operation benchmarks (varies CPU count) # Sample expected results: -# BenchmarkFoundationDBStore_InsertEntry-8 1000 1.2ms per op -# BenchmarkFoundationDBStore_FindEntry-8 5000 0.5ms per op -# BenchmarkFoundationDBStore_KvOperations-8 2000 0.8ms per op +# BenchmarkFoundationDBStore_InsertEntry-8 1000 1.2ms per op +# BenchmarkFoundationDBStore_FindEntry-8 5000 0.5ms per op +# BenchmarkFoundationDBStore_KvOperations-8 2000 0.8ms per op +# BenchmarkFoundationDBStore_InsertEntry_NoBatch-8 1000 1.0ms per op (optimal for S3) +# BenchmarkFoundationDBStore_InsertEntry_WithBatch-8 1200 0.9ms per op (bulk ingestion) +# BenchmarkFoundationDBStore_ConcurrentInsert_*-8 5000 0.3ms per op (parallel writes) +``` + +### Batch vs Non-Batch Performance + +The FoundationDB filer store supports two write modes: + +| Mode | Config | Best For | Latency | Throughput | +|------|--------|----------|---------|------------| +| **Direct Commit** (default) | `batch_enabled = false` | S3 API, low-latency workloads | ~1-5ms per op | Good | +| **Batched** | `batch_enabled = true` | Bulk ingestion, high throughput | Variable | Higher | + +Run the comparison benchmark: +```bash +make test-benchmark-filer ``` ## Contributing diff --git a/test/foundationdb/docker-compose.arm64.yml b/test/foundationdb/docker-compose.arm64.yml index c2e7e8586..f5381743f 100644 --- a/test/foundationdb/docker-compose.arm64.yml +++ b/test/foundationdb/docker-compose.arm64.yml @@ -28,8 +28,8 @@ services: echo 'testing:testing@fdb1:4500,fdb2:4502,fdb3:4504' > /var/fdb/config/fdb.cluster fi # Start FDB processes - /usr/bin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb1:4501 --listen_address=0.0.0.0:4501 --coordination=fdb1:4500 & - /usr/bin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb1:4500 --listen_address=0.0.0.0:4500 --coordination=fdb1:4500 --class=coordination & + /usr/sbin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb1:4501 --listen_address=0.0.0.0:4501 --coordination=fdb1:4500 & + /usr/sbin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb1:4500 --listen_address=0.0.0.0:4500 --coordination=fdb1:4500 --class=coordination & wait " @@ -59,8 +59,8 @@ services: # Wait for cluster file from fdb1 while [ ! -f /var/fdb/config/fdb.cluster ]; do sleep 1; done # Start FDB processes - /usr/bin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb2:4503 --listen_address=0.0.0.0:4503 --coordination=fdb1:4500 & - /usr/bin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb2:4502 --listen_address=0.0.0.0:4502 --coordination=fdb1:4500 --class=coordination & + /usr/sbin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb2:4503 --listen_address=0.0.0.0:4503 --coordination=fdb1:4500 & + /usr/sbin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb2:4502 --listen_address=0.0.0.0:4502 --coordination=fdb1:4500 --class=coordination & wait " @@ -90,8 +90,8 @@ services: # Wait for cluster file from fdb1 while [ ! -f /var/fdb/config/fdb.cluster ]; do sleep 1; done # Start FDB processes - /usr/bin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb3:4505 --listen_address=0.0.0.0:4505 --coordination=fdb1:4500 & - /usr/bin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb3:4504 --listen_address=0.0.0.0:4504 --coordination=fdb1:4500 --class=coordination & + /usr/sbin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb3:4505 --listen_address=0.0.0.0:4505 --coordination=fdb1:4500 & + /usr/sbin/fdbserver --config_path=/var/fdb/config --datadir=/var/fdb/data --logdir=/var/fdb/logs --public_address=fdb3:4504 --listen_address=0.0.0.0:4504 --coordination=fdb1:4500 --class=coordination & wait " diff --git a/test/foundationdb/filer.toml b/test/foundationdb/filer.toml index b085a831a..9e6bdc2a7 100644 --- a/test/foundationdb/filer.toml +++ b/test/foundationdb/filer.toml @@ -1,19 +1,8 @@ -# FoundationDB Filer Configuration +# FoundationDB Filer Configuration for Testing [foundationdb] enabled = true cluster_file = "/var/fdb/config/fdb.cluster" -api_version = 740 -timeout = "5s" -max_retry_delay = "1s" -directory_prefix = "seaweedfs" - -# For testing different configurations -[foundationdb.test] -enabled = false -cluster_file = "/var/fdb/config/fdb.cluster" -api_version = 740 -timeout = "10s" -max_retry_delay = "2s" -directory_prefix = "seaweedfs_test" -location = "/test" +# api_version = 740 +# timeout = "5s" +# directory_prefix = "seaweedfs" diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml index 2892fa0d2..a685d50d8 100644 --- a/weed/command/scaffold/filer.toml +++ b/weed/command/scaffold/filer.toml @@ -415,6 +415,16 @@ key_path="" # The name list used to verify the cn name verify_cn="" +[foundationdb] +# FoundationDB provides ACID transactions and horizontal scalability. +# Requires: go build -tags foundationdb +enabled = false +cluster_file = "/etc/foundationdb/fdb.cluster" +# api_version = 740 +# timeout = "5s" +# directory_prefix = "seaweedfs" +# For bulk ingestion, enable batching: batch_enabled = true + [tarantool] address = "localhost:3301" user = "guest" diff --git a/weed/filer/foundationdb/CONFIGURATION.md b/weed/filer/foundationdb/CONFIGURATION.md index 80f5bd357..75227f060 100644 --- a/weed/filer/foundationdb/CONFIGURATION.md +++ b/weed/filer/foundationdb/CONFIGURATION.md @@ -27,6 +27,10 @@ export WEED_FOUNDATIONDB_API_VERSION=740 export WEED_FOUNDATIONDB_TIMEOUT=5s export WEED_FOUNDATIONDB_MAX_RETRY_DELAY=1s export WEED_FOUNDATIONDB_DIRECTORY_PREFIX=seaweedfs +# Write batching (disabled by default) +export WEED_FOUNDATIONDB_BATCH_ENABLED=false +export WEED_FOUNDATIONDB_BATCH_SIZE=100 +export WEED_FOUNDATIONDB_BATCH_INTERVAL=1ms ``` ### 3. Command Line Arguments @@ -56,6 +60,18 @@ While not directly supported, configuration can be specified via config files pa |--------|------|---------|-------------| | `directory_prefix` | string | `seaweedfs` | Directory prefix for key organization | +### Write Batching Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `batch_enabled` | boolean | `false` | Enable write batching. Disabled by default for optimal S3 PUT latency. | +| `batch_size` | integer | `100` | Maximum number of operations per batch (when batching is enabled) | +| `batch_interval` | duration | `1ms` | Maximum time to wait before flushing a batch (when batching is enabled) | + +**Note:** Write batching is **disabled by default**. Each write commits immediately in its own +transaction, providing optimal latency for S3 PUT operations. Enable batching only for +high-throughput bulk ingestion workloads where you can tolerate slightly higher per-operation latency. + ## Configuration Examples ### Development Environment @@ -82,7 +98,25 @@ max_retry_delay = "5s" directory_prefix = "seaweedfs_prod" ``` -### High-Performance Setup +### High-Performance Setup (Low Latency) + +For S3 workloads requiring low latency per operation: + +```toml +[foundationdb] +enabled = true +cluster_file = "/etc/foundationdb/fdb.cluster" +api_version = 740 +timeout = "5s" +max_retry_delay = "1s" +directory_prefix = "sw" # Shorter prefix for efficiency +# Batching disabled (default) for optimal per-operation latency +batch_enabled = false +``` + +### High-Throughput Bulk Ingestion + +For bulk data loading where throughput matters more than per-operation latency: ```toml [foundationdb] @@ -91,7 +125,11 @@ cluster_file = "/etc/foundationdb/fdb.cluster" api_version = 740 timeout = "60s" max_retry_delay = "10s" -directory_prefix = "sw" # Shorter prefix for efficiency +directory_prefix = "sw" +# Enable batching for higher throughput +batch_enabled = true +batch_size = 100 +batch_interval = "1ms" ``` ### Path-Specific Configuration diff --git a/weed/filer/foundationdb/README.md b/weed/filer/foundationdb/README.md index 68ba6416a..5c23f0ce5 100644 --- a/weed/filer/foundationdb/README.md +++ b/weed/filer/foundationdb/README.md @@ -51,6 +51,9 @@ directory_prefix = "seaweedfs" | `timeout` | Operation timeout duration | `5s` | No | | `max_retry_delay` | Maximum retry delay | `1s` | No | | `directory_prefix` | Directory prefix for organization | `seaweedfs` | No | +| `batch_enabled` | Enable write batching (see Performance section) | `false` | No | +| `batch_size` | Max operations per batch | `100` | No | +| `batch_interval` | Max time before batch flush | `1ms` | No | ### Path-Specific Configuration @@ -109,12 +112,38 @@ make setup ## Performance Considerations +### Write Batching Configuration + +By default, write batching is **disabled** (`batch_enabled = false`). Each write commits +immediately in its own transaction. This provides optimal latency for S3 PUT operations. + +**When to enable batching:** +- High-throughput bulk ingestion workloads +- Scenarios where you can tolerate slightly higher per-operation latency +- Workloads with many concurrent small writes + +**Batching configuration options:** + +```toml +[foundationdb] +# Enable write batching (disabled by default for optimal S3 latency) +batch_enabled = true +# Maximum operations per batch +batch_size = 100 +# Maximum time to wait before flushing a batch +batch_interval = "1ms" +``` + +**Performance comparison:** +- **Batching disabled**: Each S3 PUT commits immediately (~1-5ms per op depending on FDB latency) +- **Batching enabled**: Operations are grouped, reducing total commits but adding batch interval latency + ### Optimal Configuration - **API Version**: Use the latest stable API version (720+) - **Directory Structure**: Use logical directory prefixes to isolate different SeaweedFS instances - **Transaction Size**: Keep transactions under 10MB (FDB limit) -- **Batch Operations**: Use transactions for multiple related operations +- **Concurrency**: Use multiple client connections for parallel operations ### Monitoring diff --git a/weed/filer/foundationdb/foundationdb_store.go b/weed/filer/foundationdb/foundationdb_store.go index cbbdc96b2..abbf55091 100644 --- a/weed/filer/foundationdb/foundationdb_store.go +++ b/weed/filer/foundationdb/foundationdb_store.go @@ -45,8 +45,12 @@ const ( MAX_DIRECTORY_LIST_LIMIT = 1000 // Write batching defaults + // Note: Batching is disabled by default because S3 semantics require waiting + // for durability, and the batch timer adds latency to each operation. + // Enable batching only for workloads that can tolerate potential latency. DEFAULT_BATCH_SIZE = 100 - DEFAULT_BATCH_INTERVAL = 5 * time.Millisecond + DEFAULT_BATCH_INTERVAL = 1 * time.Millisecond + DEFAULT_BATCH_ENABLED = false ) func init() { @@ -129,11 +133,33 @@ func (b *writeBatcher) run() { timer.Reset(b.interval) } + // Collect available ops without blocking + collectAvailable := func() { + for { + select { + case op := <-b.ops: + batch = append(batch, op) + batchBytes += op.size() + if len(batch) >= b.size || batchBytes >= FDB_BATCH_SIZE_LIMIT { + return + } + default: + return + } + } + } + for { select { case op := <-b.ops: batch = append(batch, op) batchBytes += op.size() + + // Optimization: When an operation arrives, try to collect more + // available operations without blocking. This improves throughput + // when multiple goroutines are submitting concurrently. + collectAvailable() + // Flush when batch count or size limit is reached if len(batch) >= b.size || batchBytes >= FDB_BATCH_SIZE_LIMIT { flush() @@ -185,8 +211,10 @@ type FoundationDBStore struct { directoryPrefix string timeout time.Duration maxRetryDelay time.Duration - // Write batching + // Write batching - disabled by default for optimal S3 latency + // Enable for high-throughput bulk ingestion workloads batcher *writeBatcher + batchEnabled bool batchSize int batchInterval time.Duration } @@ -225,6 +253,10 @@ func (store *FoundationDBStore) Initialize(configuration util.Configuration, pre configuration.SetDefault(prefix+"timeout", "5s") configuration.SetDefault(prefix+"max_retry_delay", "1s") configuration.SetDefault(prefix+"directory_prefix", "seaweedfs") + // Batching is disabled by default - each write commits immediately. + // This provides optimal latency for S3 PUT operations. + // Enable batching for high-throughput bulk ingestion workloads. + configuration.SetDefault(prefix+"batch_enabled", DEFAULT_BATCH_ENABLED) configuration.SetDefault(prefix+"batch_size", DEFAULT_BATCH_SIZE) configuration.SetDefault(prefix+"batch_interval", DEFAULT_BATCH_INTERVAL.String()) @@ -247,6 +279,7 @@ func (store *FoundationDBStore) Initialize(configuration util.Configuration, pre } // Parse batch configuration + store.batchEnabled = configuration.GetBool(prefix + "batch_enabled") store.batchSize = configuration.GetInt(prefix + "batch_size") if store.batchSize <= 0 { store.batchSize = DEFAULT_BATCH_SIZE @@ -288,10 +321,16 @@ func (store *FoundationDBStore) initialize(clusterFile string, apiVersion int) e return fmt.Errorf("failed to create/open kv directory: %w", err) } - // Start write batcher for improved throughput - store.batcher = newWriteBatcher(store, store.batchSize, store.batchInterval) - glog.V(0).Infof("FoundationDB: write batching enabled (batch_size=%d, batch_interval=%v)", - store.batchSize, store.batchInterval) + // Conditionally start write batcher + // Batching is disabled by default for optimal S3 latency. + // When disabled, each write commits immediately in its own transaction. + if store.batchEnabled { + store.batcher = newWriteBatcher(store, store.batchSize, store.batchInterval) + glog.V(0).Infof("FoundationDB: write batching enabled (batch_size=%d, batch_interval=%v)", + store.batchSize, store.batchInterval) + } else { + glog.V(0).Infof("FoundationDB: write batching disabled (direct commit mode for optimal latency)") + } glog.V(0).Infof("FoundationDB store initialized successfully with directory prefix: %s", store.directoryPrefix) return nil diff --git a/weed/filer/foundationdb/foundationdb_store_test.go b/weed/filer/foundationdb/foundationdb_store_test.go index 73255d67d..aab8b1f65 100644 --- a/weed/filer/foundationdb/foundationdb_store_test.go +++ b/weed/filer/foundationdb/foundationdb_store_test.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "strings" + "sync/atomic" "testing" "time" @@ -325,6 +326,120 @@ func BenchmarkFoundationDBStore_KvOperations(b *testing.B) { } } +// BenchmarkFoundationDBStore_InsertEntry_NoBatch benchmarks insert performance +// with batching disabled (direct commit mode - optimal for S3 PUT latency) +func BenchmarkFoundationDBStore_InsertEntry_NoBatch(b *testing.B) { + store := createBenchmarkStoreWithBatching(b, false, 100, 1*time.Millisecond) + defer store.Shutdown() + + ctx := context.Background() + entry := &filer.Entry{ + FullPath: "/benchmark_nobatch/file.txt", + Attr: filer.Attr{ + Mode: 0644, + Uid: 1000, + Gid: 1000, + Mtime: time.Now(), + }, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + entry.FullPath = util.NewFullPath("/benchmark_nobatch", fmt.Sprintf("%x", uint64(i))+".txt") + err := store.InsertEntry(ctx, entry) + if err != nil { + b.Fatalf("InsertEntry failed: %v", err) + } + } +} + +// BenchmarkFoundationDBStore_InsertEntry_WithBatch benchmarks insert performance +// with batching enabled (higher throughput for bulk ingestion) +func BenchmarkFoundationDBStore_InsertEntry_WithBatch(b *testing.B) { + store := createBenchmarkStoreWithBatching(b, true, 100, 1*time.Millisecond) + defer store.Shutdown() + + ctx := context.Background() + entry := &filer.Entry{ + FullPath: "/benchmark_batch/file.txt", + Attr: filer.Attr{ + Mode: 0644, + Uid: 1000, + Gid: 1000, + Mtime: time.Now(), + }, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + entry.FullPath = util.NewFullPath("/benchmark_batch", fmt.Sprintf("%x", uint64(i))+".txt") + err := store.InsertEntry(ctx, entry) + if err != nil { + b.Fatalf("InsertEntry failed: %v", err) + } + } +} + +// BenchmarkFoundationDBStore_ConcurrentInsert_NoBatch benchmarks concurrent inserts +// with batching disabled (simulates S3 PUT concurrency) +func BenchmarkFoundationDBStore_ConcurrentInsert_NoBatch(b *testing.B) { + store := createBenchmarkStoreWithBatching(b, false, 100, 1*time.Millisecond) + defer store.Shutdown() + + var counter atomic.Uint64 + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + ctx := context.Background() + for pb.Next() { + n := counter.Add(1) + entry := &filer.Entry{ + FullPath: util.NewFullPath("/benchmark_concurrent_nobatch", fmt.Sprintf("%d.txt", n)), + Attr: filer.Attr{ + Mode: 0644, + Uid: 1000, + Gid: 1000, + Mtime: time.Now(), + }, + } + err := store.InsertEntry(ctx, entry) + if err != nil { + b.Fatalf("InsertEntry failed: %v", err) + } + } + }) +} + +// BenchmarkFoundationDBStore_ConcurrentInsert_WithBatch benchmarks concurrent inserts +// with batching enabled (tests batch efficiency under concurrent load) +func BenchmarkFoundationDBStore_ConcurrentInsert_WithBatch(b *testing.B) { + store := createBenchmarkStoreWithBatching(b, true, 100, 1*time.Millisecond) + defer store.Shutdown() + + var counter atomic.Uint64 + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + ctx := context.Background() + for pb.Next() { + n := counter.Add(1) + entry := &filer.Entry{ + FullPath: util.NewFullPath("/benchmark_concurrent_batch", fmt.Sprintf("%d.txt", n)), + Attr: filer.Attr{ + Mode: 0644, + Uid: 1000, + Gid: 1000, + Mtime: time.Now(), + }, + } + err := store.InsertEntry(ctx, entry) + if err != nil { + b.Fatalf("InsertEntry failed: %v", err) + } + } + }) +} + // Helper functions func getTestClusterFile() string { clusterFile := os.Getenv("FDB_CLUSTER_FILE") @@ -349,6 +464,32 @@ func createBenchmarkStore(b *testing.B) *FoundationDBStore { return store } +// createBenchmarkStoreWithBatching creates a store with specific batching configuration +// for comparing performance between batched and non-batched modes +func createBenchmarkStoreWithBatching(b *testing.B, batchEnabled bool, batchSize int, batchInterval time.Duration) *FoundationDBStore { + clusterFile := getTestClusterFile() + if _, err := os.Stat(clusterFile); os.IsNotExist(err) { + b.Skip("FoundationDB cluster file not found, skipping benchmark") + } + + store := &FoundationDBStore{ + batchEnabled: batchEnabled, + batchSize: batchSize, + batchInterval: batchInterval, + directoryPrefix: "benchmark", + timeout: 5 * time.Second, + maxRetryDelay: 1 * time.Second, + } + err := store.initialize(clusterFile, 740) + if err != nil { + b.Skipf("Failed to initialize FoundationDB store: %v", err) + } + + // Note: initialize() already creates the batcher if batchEnabled is true + + return store +} + func getTestStore(t *testing.T) *FoundationDBStore { t.Helper()