Browse Source

Add volume server integration test suite and CI workflow (#8322)

* docs(volume_server): add integration test development plan

* test(volume_server): add integration harness and profile matrix

* test(volume_server/http): add admin and options integration coverage

* test(volume_server/grpc): add state and status integration coverage

* test(volume_server): auto-build weed binary and harden cluster startup

* test(volume_server/http): add upload read range head delete coverage

* test(volume_server/grpc): expand admin lifecycle and state coverage

* docs(volume_server): update progress tracker for implemented tests

* test(volume_server/http): cover if-none-match and invalid-range branches

* test(volume_server/grpc): add batch delete integration coverage

* docs(volume_server): log latest HTTP and gRPC test coverage

* ci(volume_server): run volume server integration tests in github actions

* test(volume_server/grpc): add needle status configure ping and leave coverage

* docs(volume_server): record additional grpc coverage progress

* test(volume_server/grpc): add vacuum integration coverage

* docs(volume_server): record vacuum test coverage progress

* test(volume_server/grpc): add read and write needle blob error-path coverage

* docs(volume_server): record data rw grpc coverage progress

* test(volume_server/http): add jwt auth integration coverage

* test(volume_server/grpc): add sync copy and stream error-path coverage

* docs(volume_server): record jwt and sync/copy test coverage

* test(volume_server/grpc): add scrub and query integration coverage

* test(volume_server/grpc): add volume tail sender and receiver coverage

* docs(volume_server): record scrub query and tail test progress

* test(volume_server/grpc): add readonly writable and collection lifecycle coverage

* test(volume_server/http): add public-port cors and method parity coverage

* test(volume_server/grpc): add blob meta and read-all success path coverage

* test(volume_server/grpc): expand scrub and query variation coverage

* test(volume_server/grpc): add tiering and remote fetch error-path coverage

* test(volume_server/http): add unchanged write and delete edge-case coverage

* test(volume_server/grpc): add ping unknown and unreachable target coverage

* test(volume_server/grpc): add volume delete only-empty variation coverage

* test(volume_server/http): add jwt fid-mismatch auth coverage

* test(volume_server/grpc): add scrub ec auto-select empty coverage

* test(volume_server/grpc): stabilize ping timestamp assertion

* docs(volume_server): update integration coverage progress log

* test(volume_server/grpc): add tier remote backend and config variation coverage

* docs(volume_server): record tier remote variation progress

* test(volume_server/grpc): add incremental copy and receive-file protocol coverage

* test(volume_server/http): add read path shape and if-modified-since coverage

* test(volume_server/grpc): add copy-file compaction and receive-file success coverage

* test(volume_server/http): add passthrough headers and static asset coverage

* test(volume_server/grpc): add ping filer unreachable coverage

* docs(volume_server): record copy receive and http variant progress

* test(volume_server/grpc): add erasure coding maintenance and missing-path coverage

* docs(volume_server): record initial erasure coding rpc coverage

* test(volume_server/http): add multi-range multipart response coverage

* docs(volume_server): record multi-range http coverage progress

* test(volume_server/grpc): add query empty-stripe no-match coverage

* docs(volume_server): record query no-match stream behavior coverage

* test(volume_server/http): add upload throttling timeout and replicate bypass coverage

* docs(volume_server): record upload throttling coverage progress

* test(volume_server/http): add download throttling timeout coverage

* docs(volume_server): record download throttling coverage progress

* test(volume_server/http): add jwt wrong-cookie fid mismatch coverage

* docs(volume_server): record jwt wrong-cookie mismatch coverage

* test(volume_server/http): add jwt expired-token rejection coverage

* docs(volume_server): record jwt expired-token coverage

* test(volume_server/http): add jwt query and cookie transport coverage

* docs(volume_server): record jwt token transport coverage

* test(volume_server/http): add jwt token-source precedence coverage

* docs(volume_server): record jwt token-source precedence coverage

* test(volume_server/http): add jwt header-over-cookie precedence coverage

* docs(volume_server): record jwt header cookie precedence coverage

* test(volume_server/http): add jwt query-over-cookie precedence coverage

* docs(volume_server): record jwt query cookie precedence coverage

* test(volume_server/grpc): add setstate version mismatch and nil-state coverage

* docs(volume_server): record setstate validation coverage

* test(volume_server/grpc): add readonly persist-true lifecycle coverage

* docs(volume_server): record readonly persist variation coverage

* test(volume_server/http): add options origin cors header coverage

* docs(volume_server): record options origin cors coverage

* test(volume_server/http): add trace unsupported-method parity coverage

* docs(volume_server): record trace method parity coverage

* test(volume_server/grpc): add batch delete cookie-check variation coverage

* docs(volume_server): record batch delete cookie-check coverage

* test(volume_server/grpc): add admin lifecycle missing and maintenance variants

* docs(volume_server): record admin lifecycle edge-case coverage

* test(volume_server/grpc): add mixed batch delete status matrix coverage

* docs(volume_server): record mixed batch delete matrix coverage

* test(volume_server/http): add jwt-profile ui access gating coverage

* docs(volume_server): record jwt ui-gating http coverage

* test(volume_server/http): add propfind unsupported-method parity coverage

* docs(volume_server): record propfind method parity coverage

* test(volume_server/grpc): add volume configure success and rollback-path coverage

* docs(volume_server): record volume configure branch coverage

* test(volume_server/grpc): add volume needle status missing-path coverage

* docs(volume_server): record volume needle status error-path coverage

* test(volume_server/http): add readDeleted query behavior coverage

* docs(volume_server): record readDeleted http behavior coverage

* test(volume_server/http): add delete ts override parity coverage

* docs(volume_server): record delete ts parity coverage

* test(volume_server/grpc): add invalid blob/meta offset coverage

* docs(volume_server): record invalid blob/meta offset coverage

* test(volume_server/grpc): add read-all mixed volume abort coverage

* docs(volume_server): record read-all mixed-volume abort coverage

* test(volume_server/http): assert head response body parity

* docs(volume_server): record head body parity assertion

* test(volume_server/grpc): assert status state and memory payload completeness

* docs(volume_server): record volume server status payload coverage

* test(volume_server/grpc): add batch delete chunk-manifest rejection coverage

* docs(volume_server): record batch delete chunk-manifest coverage

* test(volume_server/grpc): add query cookie-mismatch eof parity coverage

* docs(volume_server): record query cookie-mismatch parity coverage

* test(volume_server/grpc): add ping master success target coverage

* docs(volume_server): record ping master success coverage

* test(volume_server/http): add head if-none-match conditional parity

* docs(volume_server): record head if-none-match parity coverage

* test(volume_server/http): add head if-modified-since parity coverage

* docs(volume_server): record head if-modified-since parity coverage

* test(volume_server/http): add connect unsupported-method parity coverage

* docs(volume_server): record connect method parity coverage

* test(volume_server/http): assert options allow-headers cors parity

* docs(volume_server): record options allow-headers coverage

* test(volume_server/framework): add dual volume cluster integration harness

* test(volume_server/http): add missing-local read mode proxy redirect local coverage

* docs(volume_server): record read mode missing-local matrix coverage

* test(volume_server/http): add download over-limit replica proxy fallback coverage

* docs(volume_server): record download replica fallback coverage

* test(volume_server/http): add missing-local readDeleted proxy redirect parity coverage

* docs(volume_server): record missing-local readDeleted mode coverage

* test(volume_server/framework): add single-volume cluster with filer harness

* test(volume_server/grpc): add ping filer success target coverage

* docs(volume_server): record ping filer success coverage

* test(volume_server/http): add proxied-loop guard download timeout coverage

* docs(volume_server): record proxied-loop download coverage

* test(volume_server/http): add disabled upload and download limit coverage

* docs(volume_server): record disabled throttling path coverage

* test(volume_server/grpc): add idempotent volume server leave coverage

* docs(volume_server): record leave idempotence coverage

* test(volume_server/http): add redirect collection query preservation coverage

* docs(volume_server): record redirect collection query coverage

* test(volume_server/http): assert admin server headers on status and health

* docs(volume_server): record admin server header coverage

* test(volume_server/http): assert healthz request-id echo parity

* docs(volume_server): record healthz request-id parity coverage

* test(volume_server/http): add over-limit invalid-vid download branch coverage

* docs(volume_server): record over-limit invalid-vid branch coverage

* test(volume_server/http): add public-port static asset coverage

* docs(volume_server): record public static endpoint coverage

* test(volume_server/http): add public head method parity coverage

* docs(volume_server): record public head parity coverage

* test(volume_server/http): add throttling wait-then-proceed path coverage

* docs(volume_server): record throttling wait-then-proceed coverage

* test(volume_server/http): add read cookie-mismatch not-found coverage

* docs(volume_server): record read cookie-mismatch coverage

* test(volume_server/http): add throttling timeout-recovery coverage

* docs(volume_server): record throttling timeout-recovery coverage

* test(volume_server/grpc): add ec generate mount info unmount lifecycle coverage

* docs(volume_server): record ec positive lifecycle coverage

* test(volume_server/grpc): add ec shard read and blob delete lifecycle coverage

* docs(volume_server): record ec shard read/blob delete lifecycle coverage

* test(volume_server/grpc): add ec rebuild and to-volume error branch coverage

* docs(volume_server): record ec rebuild and to-volume branch coverage

* test(volume_server/grpc): add ec shards-to-volume success roundtrip coverage

* docs(volume_server): record ec shards-to-volume success coverage

* test(volume_server/grpc): add ec receive and copy-file missing-source coverage

* docs(volume_server): record ec receive and copy-file coverage

* test(volume_server/grpc): add ec last-shard delete cleanup coverage

* docs(volume_server): record ec last-shard delete cleanup coverage

* test(volume_server/grpc): add volume copy success path coverage

* docs(volume_server): record volume copy success coverage

* test(volume_server/grpc): add volume copy overwrite-destination coverage

* docs(volume_server): record volume copy overwrite coverage

* test(volume_server/http): add write error-path variant coverage

* docs(volume_server): record http write error-path coverage

* test(volume_server/http): add conditional header precedence coverage

* docs(volume_server): record conditional header precedence coverage

* test(volume_server/http): add oversized combined range guard coverage

* docs(volume_server): record oversized range guard coverage

* test(volume_server/http): add image resize and crop read coverage

* docs(volume_server): record image transform coverage

* test(volume_server/http): add chunk-manifest expansion and bypass coverage

* docs(volume_server): record chunk-manifest read coverage

* test(volume_server/http): add compressed read encoding matrix coverage

* docs(volume_server): record compressed read matrix coverage

* test(volume_server/grpc): add tail receiver source replication coverage

* docs(volume_server): record tail receiver replication coverage

* test(volume_server/grpc): add tail sender large-needle chunking coverage

* docs(volume_server): record tail sender chunking coverage

* test(volume_server/grpc): add ec-backed volume needle status coverage

* docs(volume_server): record ec-backed needle status coverage

* test(volume_server/grpc): add ec shard copy from peer success coverage

* docs(volume_server): record ec shard copy success coverage

* test(volume_server/http): add chunk-manifest delete child cleanup coverage

* docs(volume_server): record chunk-manifest delete cleanup coverage

* test(volume_server/http): add chunk-manifest delete failure-path coverage

* docs(volume_server): record chunk-manifest delete failure coverage

* test(volume_server/grpc): add ec shard copy source-unavailable coverage

* docs(volume_server): record ec shard copy source-unavailable coverage

* parallel
pull/8339/head
Chris Lu 4 weeks ago
committed by GitHub
parent
commit
beeb375a88
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 122
      .github/workflows/volume-server-integration-tests.yml
  2. 1129
      test/volume_server/DEV_PLAN.md
  3. 7
      test/volume_server/Makefile
  4. 27
      test/volume_server/README.md
  5. 442
      test/volume_server/framework/cluster.go
  6. 293
      test/volume_server/framework/cluster_dual.go
  7. 91
      test/volume_server/framework/cluster_with_filer.go
  8. 8
      test/volume_server/framework/fault_injection.go
  9. 28
      test/volume_server/framework/grpc_client.go
  10. 34
      test/volume_server/framework/http_client.go
  11. 56
      test/volume_server/framework/volume_fixture.go
  12. 445
      test/volume_server/grpc/admin_extra_test.go
  13. 215
      test/volume_server/grpc/admin_lifecycle_test.go
  14. 177
      test/volume_server/grpc/admin_readonly_collection_test.go
  15. 264
      test/volume_server/grpc/batch_delete_test.go
  16. 431
      test/volume_server/grpc/copy_receive_variants_test.go
  17. 284
      test/volume_server/grpc/copy_sync_test.go
  18. 146
      test/volume_server/grpc/data_rw_test.go
  19. 273
      test/volume_server/grpc/data_stream_success_test.go
  20. 777
      test/volume_server/grpc/erasure_coding_test.go
  21. 139
      test/volume_server/grpc/health_state_test.go
  22. 385
      test/volume_server/grpc/scrub_query_test.go
  23. 206
      test/volume_server/grpc/tail_test.go
  24. 236
      test/volume_server/grpc/tiering_remote_test.go
  25. 87
      test/volume_server/grpc/vacuum_test.go
  26. 174
      test/volume_server/http/admin_test.go
  27. 419
      test/volume_server/http/auth_test.go
  28. 232
      test/volume_server/http/chunk_manifest_test.go
  29. 97
      test/volume_server/http/compressed_read_test.go
  30. 102
      test/volume_server/http/headers_static_test.go
  31. 92
      test/volume_server/http/image_transform_test.go
  32. 287
      test/volume_server/http/public_cors_methods_test.go
  33. 82
      test/volume_server/http/range_variants_test.go
  34. 54
      test/volume_server/http/read_deleted_test.go
  35. 319
      test/volume_server/http/read_mode_proxy_redirect_test.go
  36. 191
      test/volume_server/http/read_path_variants_test.go
  37. 123
      test/volume_server/http/read_write_delete_test.go
  38. 730
      test/volume_server/http/throttling_test.go
  39. 118
      test/volume_server/http/write_delete_variants_test.go
  40. 74
      test/volume_server/http/write_error_variants_test.go
  41. 63
      test/volume_server/matrix/config_profiles.go

122
.github/workflows/volume-server-integration-tests.yml

@ -0,0 +1,122 @@
name: "Volume Server Integration Tests"
on:
pull_request:
branches: [ master ]
paths:
- 'test/volume_server/**'
- 'weed/server/**'
- 'weed/storage/**'
- 'weed/pb/volume_server.proto'
- 'weed/pb/volume_server_pb/**'
- '.github/workflows/volume-server-integration-tests.yml'
push:
branches: [ master, main ]
paths:
- 'test/volume_server/**'
- 'weed/server/**'
- 'weed/storage/**'
- 'weed/pb/volume_server.proto'
- 'weed/pb/volume_server_pb/**'
- '.github/workflows/volume-server-integration-tests.yml'
concurrency:
group: ${{ github.head_ref || github.ref }}/volume-server-integration-tests
cancel-in-progress: true
permissions:
contents: read
env:
GO_VERSION: '1.24'
TEST_TIMEOUT: '30m'
jobs:
volume-server-integration-tests:
name: Volume Server Integration Tests (${{ matrix.test-type }} - Shard ${{ matrix.shard }})
runs-on: ubuntu-22.04
timeout-minutes: 45
strategy:
fail-fast: false
matrix:
test-type: [grpc, http]
shard: [1, 2, 3]
steps:
- name: Checkout code
uses: actions/checkout@v6
- name: Set up Go ${{ env.GO_VERSION }}
uses: actions/setup-go@v6
with:
go-version: ${{ env.GO_VERSION }}
- name: Build SeaweedFS binary
run: |
cd weed
go build -o weed .
chmod +x weed
./weed version
- name: Run volume server integration tests
env:
WEED_BINARY: ${{ github.workspace }}/weed/weed
run: |
if [ "${{ matrix.test-type }}" == "grpc" ]; then
if [ "${{ matrix.shard }}" == "1" ]; then
TEST_PATTERN="^Test[A-H]"
elif [ "${{ matrix.shard }}" == "2" ]; then
TEST_PATTERN="^Test[I-S]"
else
TEST_PATTERN="^Test[T-Z]"
fi
else
if [ "${{ matrix.shard }}" == "1" ]; then
TEST_PATTERN="^Test[A-G]"
elif [ "${{ matrix.shard }}" == "2" ]; then
TEST_PATTERN="^Test[H-R]"
else
TEST_PATTERN="^Test[S-Z]"
fi
fi
echo "Running volume server integration tests for ${{ matrix.test-type }} (Shard ${{ matrix.shard }}, pattern: ${TEST_PATTERN})..."
go test -v -count=1 -timeout=${{ env.TEST_TIMEOUT }} ./test/volume_server/${{ matrix.test-type }}/... -run "${TEST_PATTERN}"
- name: Collect logs on failure
if: failure()
run: |
mkdir -p /tmp/volume-server-it-logs
find /tmp -maxdepth 1 -type d -name "seaweedfs_volume_server_it_*" -print -exec cp -r {} /tmp/volume-server-it-logs/ \; || true
- name: Archive logs on failure
if: failure()
uses: actions/upload-artifact@v6
with:
name: volume-server-integration-test-logs
path: /tmp/volume-server-it-logs/
if-no-files-found: warn
retention-days: 7
- name: Test summary
if: always()
run: |
if [ "${{ matrix.test-type }}" == "grpc" ]; then
if [ "${{ matrix.shard }}" == "1" ]; then
TEST_PATTERN="^Test[A-H]"
elif [ "${{ matrix.shard }}" == "2" ]; then
TEST_PATTERN="^Test[I-S]"
else
TEST_PATTERN="^Test[T-Z]"
fi
else
if [ "${{ matrix.shard }}" == "1" ]; then
TEST_PATTERN="^Test[A-G]"
elif [ "${{ matrix.shard }}" == "2" ]; then
TEST_PATTERN="^Test[H-R]"
else
TEST_PATTERN="^Test[S-Z]"
fi
fi
echo "## Volume Server Integration Test Summary (${{ matrix.test-type }} - Shard ${{ matrix.shard }})" >> "$GITHUB_STEP_SUMMARY"
echo "- Suite: test/volume_server/${{ matrix.test-type }} (Pattern: ${TEST_PATTERN})" >> "$GITHUB_STEP_SUMMARY"
echo "- Command: go test -v -count=1 -timeout=${{ env.TEST_TIMEOUT }} ./test/volume_server/${{ matrix.test-type }}/... -run \"${TEST_PATTERN}\"" >> "$GITHUB_STEP_SUMMARY"

1129
test/volume_server/DEV_PLAN.md
File diff suppressed because it is too large
View File

7
test/volume_server/Makefile

@ -0,0 +1,7 @@
.PHONY: test-volume-server test-volume-server-short
test-volume-server:
go test ./test/volume_server/... -v
test-volume-server-short:
go test ./test/volume_server/... -short -v

27
test/volume_server/README.md

@ -0,0 +1,27 @@
# Volume Server Integration Tests
This package contains integration tests for SeaweedFS volume server HTTP and gRPC APIs.
## Run Tests
Run tests from repo root:
```bash
go test ./test/volume_server/... -v
```
If a `weed` binary is not found, the harness will build one automatically.
## Optional environment variables
- `WEED_BINARY`: explicit path to the `weed` executable (disables auto-build).
- `VOLUME_SERVER_IT_KEEP_LOGS=1`: keep temporary test directories and process logs.
## Current scope (Phase 0)
- Shared cluster/framework utilities
- Matrix profile definitions
- Initial HTTP admin endpoint checks
- Initial gRPC state/status checks
More API coverage is tracked in `/Users/chris/dev/seaweedfs2/test/volume_server/DEV_PLAN.md`.

442
test/volume_server/framework/cluster.go

@ -0,0 +1,442 @@
package framework
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
const (
defaultWaitTimeout = 30 * time.Second
defaultWaitTick = 200 * time.Millisecond
testVolumeSizeLimitMB = 32
)
// Cluster is a lightweight SeaweedFS master + one volume server test harness.
type Cluster struct {
testingTB testing.TB
profile matrix.Profile
weedBinary string
baseDir string
configDir string
logsDir string
keepLogs bool
masterPort int
masterGrpcPort int
volumePort int
volumeGrpcPort int
volumePubPort int
masterCmd *exec.Cmd
volumeCmd *exec.Cmd
cleanupOnce sync.Once
}
// StartSingleVolumeCluster boots one master and one volume server.
func StartSingleVolumeCluster(t testing.TB, profile matrix.Profile) *Cluster {
t.Helper()
weedBinary, err := FindOrBuildWeedBinary()
if err != nil {
t.Fatalf("resolve weed binary: %v", err)
}
baseDir, keepLogs, err := newWorkDir()
if err != nil {
t.Fatalf("create temp test directory: %v", err)
}
configDir := filepath.Join(baseDir, "config")
logsDir := filepath.Join(baseDir, "logs")
masterDataDir := filepath.Join(baseDir, "master")
volumeDataDir := filepath.Join(baseDir, "volume")
for _, dir := range []string{configDir, logsDir, masterDataDir, volumeDataDir} {
if mkErr := os.MkdirAll(dir, 0o755); mkErr != nil {
t.Fatalf("create %s: %v", dir, mkErr)
}
}
if err = writeSecurityConfig(configDir, profile); err != nil {
t.Fatalf("write security config: %v", err)
}
masterPort, masterGrpcPort, err := allocateMasterPortPair()
if err != nil {
t.Fatalf("allocate master port pair: %v", err)
}
ports, err := allocatePorts(3)
if err != nil {
t.Fatalf("allocate ports: %v", err)
}
c := &Cluster{
testingTB: t,
profile: profile,
weedBinary: weedBinary,
baseDir: baseDir,
configDir: configDir,
logsDir: logsDir,
keepLogs: keepLogs,
masterPort: masterPort,
masterGrpcPort: masterGrpcPort,
volumePort: ports[0],
volumeGrpcPort: ports[1],
volumePubPort: ports[0],
}
if profile.SplitPublicPort {
c.volumePubPort = ports[2]
}
if err = c.startMaster(masterDataDir); err != nil {
c.Stop()
t.Fatalf("start master: %v", err)
}
if err = c.waitForHTTP(c.MasterURL() + "/dir/status"); err != nil {
masterLog := c.tailLog("master.log")
c.Stop()
t.Fatalf("wait for master readiness: %v\nmaster log tail:\n%s", err, masterLog)
}
if err = c.startVolume(volumeDataDir); err != nil {
masterLog := c.tailLog("master.log")
c.Stop()
t.Fatalf("start volume: %v\nmaster log tail:\n%s", err, masterLog)
}
if err = c.waitForHTTP(c.VolumeAdminURL() + "/status"); err != nil {
volumeLog := c.tailLog("volume.log")
c.Stop()
t.Fatalf("wait for volume readiness: %v\nvolume log tail:\n%s", err, volumeLog)
}
if err = c.waitForTCP(c.VolumeGRPCAddress()); err != nil {
volumeLog := c.tailLog("volume.log")
c.Stop()
t.Fatalf("wait for volume grpc readiness: %v\nvolume log tail:\n%s", err, volumeLog)
}
t.Cleanup(func() {
c.Stop()
})
return c
}
// Stop terminates all processes and cleans temporary files.
func (c *Cluster) Stop() {
if c == nil {
return
}
c.cleanupOnce.Do(func() {
stopProcess(c.volumeCmd)
stopProcess(c.masterCmd)
if !c.keepLogs && !c.testingTB.Failed() {
_ = os.RemoveAll(c.baseDir)
} else if c.baseDir != "" {
c.testingTB.Logf("volume server integration logs kept at %s", c.baseDir)
}
})
}
func (c *Cluster) startMaster(dataDir string) error {
logFile, err := os.Create(filepath.Join(c.logsDir, "master.log"))
if err != nil {
return err
}
args := []string{
"-config_dir=" + c.configDir,
"master",
"-ip=127.0.0.1",
"-port=" + strconv.Itoa(c.masterPort),
"-port.grpc=" + strconv.Itoa(c.masterGrpcPort),
"-mdir=" + dataDir,
"-peers=none",
"-volumeSizeLimitMB=" + strconv.Itoa(testVolumeSizeLimitMB),
"-defaultReplication=000",
}
c.masterCmd = exec.Command(c.weedBinary, args...)
c.masterCmd.Dir = c.baseDir
c.masterCmd.Stdout = logFile
c.masterCmd.Stderr = logFile
return c.masterCmd.Start()
}
func (c *Cluster) startVolume(dataDir string) error {
logFile, err := os.Create(filepath.Join(c.logsDir, "volume.log"))
if err != nil {
return err
}
args := []string{
"-config_dir=" + c.configDir,
"volume",
"-ip=127.0.0.1",
"-port=" + strconv.Itoa(c.volumePort),
"-port.grpc=" + strconv.Itoa(c.volumeGrpcPort),
"-port.public=" + strconv.Itoa(c.volumePubPort),
"-dir=" + dataDir,
"-max=16",
"-master=127.0.0.1:" + strconv.Itoa(c.masterPort),
"-readMode=" + c.profile.ReadMode,
"-concurrentUploadLimitMB=" + strconv.Itoa(c.profile.ConcurrentUploadLimitMB),
"-concurrentDownloadLimitMB=" + strconv.Itoa(c.profile.ConcurrentDownloadLimitMB),
}
if c.profile.InflightUploadTimeout > 0 {
args = append(args, "-inflightUploadDataTimeout="+c.profile.InflightUploadTimeout.String())
}
if c.profile.InflightDownloadTimeout > 0 {
args = append(args, "-inflightDownloadDataTimeout="+c.profile.InflightDownloadTimeout.String())
}
c.volumeCmd = exec.Command(c.weedBinary, args...)
c.volumeCmd.Dir = c.baseDir
c.volumeCmd.Stdout = logFile
c.volumeCmd.Stderr = logFile
return c.volumeCmd.Start()
}
func (c *Cluster) waitForHTTP(url string) error {
client := &http.Client{Timeout: 1 * time.Second}
deadline := time.Now().Add(defaultWaitTimeout)
for time.Now().Before(deadline) {
resp, err := client.Get(url)
if err == nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
if resp.StatusCode < 500 {
return nil
}
}
time.Sleep(defaultWaitTick)
}
return fmt.Errorf("timed out waiting for %s", url)
}
func (c *Cluster) waitForTCP(addr string) error {
deadline := time.Now().Add(defaultWaitTimeout)
for time.Now().Before(deadline) {
conn, err := net.DialTimeout("tcp", addr, time.Second)
if err == nil {
_ = conn.Close()
return nil
}
time.Sleep(defaultWaitTick)
}
return fmt.Errorf("timed out waiting for tcp %s", addr)
}
func stopProcess(cmd *exec.Cmd) {
if cmd == nil || cmd.Process == nil {
return
}
_ = cmd.Process.Signal(os.Interrupt)
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
select {
case <-time.After(10 * time.Second):
_ = cmd.Process.Kill()
<-done
case <-done:
}
}
func allocatePorts(count int) ([]int, error) {
listeners := make([]net.Listener, 0, count)
ports := make([]int, 0, count)
for i := 0; i < count; i++ {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
for _, ll := range listeners {
_ = ll.Close()
}
return nil, err
}
listeners = append(listeners, l)
ports = append(ports, l.Addr().(*net.TCPAddr).Port)
}
for _, l := range listeners {
_ = l.Close()
}
return ports, nil
}
func allocateMasterPortPair() (int, int, error) {
for masterPort := 10000; masterPort <= 55535; masterPort++ {
masterGrpcPort := masterPort + 10000
l1, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(masterPort)))
if err != nil {
continue
}
l2, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(masterGrpcPort)))
if err != nil {
_ = l1.Close()
continue
}
_ = l2.Close()
_ = l1.Close()
return masterPort, masterGrpcPort, nil
}
return 0, 0, errors.New("unable to find available master port pair")
}
func newWorkDir() (dir string, keepLogs bool, err error) {
keepLogs = os.Getenv("VOLUME_SERVER_IT_KEEP_LOGS") == "1"
dir, err = os.MkdirTemp("", "seaweedfs_volume_server_it_")
return dir, keepLogs, err
}
func writeSecurityConfig(configDir string, profile matrix.Profile) error {
var b strings.Builder
if profile.EnableJWT {
if profile.JWTSigningKey == "" || profile.JWTReadKey == "" {
return errors.New("jwt profile requires both write and read keys")
}
b.WriteString("[jwt.signing]\n")
b.WriteString("key = \"")
b.WriteString(profile.JWTSigningKey)
b.WriteString("\"\n")
b.WriteString("expires_after_seconds = 60\n\n")
b.WriteString("[jwt.signing.read]\n")
b.WriteString("key = \"")
b.WriteString(profile.JWTReadKey)
b.WriteString("\"\n")
b.WriteString("expires_after_seconds = 60\n")
}
if b.Len() == 0 {
b.WriteString("# optional security config generated for integration tests\n")
}
return os.WriteFile(filepath.Join(configDir, "security.toml"), []byte(b.String()), 0o644)
}
// FindOrBuildWeedBinary returns an executable weed binary, building one when needed.
func FindOrBuildWeedBinary() (string, error) {
if fromEnv := os.Getenv("WEED_BINARY"); fromEnv != "" {
if isExecutableFile(fromEnv) {
return fromEnv, nil
}
return "", fmt.Errorf("WEED_BINARY is set but not executable: %s", fromEnv)
}
repoRoot := ""
if _, file, _, ok := runtime.Caller(0); ok {
repoRoot = filepath.Clean(filepath.Join(filepath.Dir(file), "..", "..", ".."))
candidate := filepath.Join(repoRoot, "weed", "weed")
if isExecutableFile(candidate) {
return candidate, nil
}
}
if repoRoot == "" {
return "", errors.New("unable to detect repository root")
}
binDir := filepath.Join(os.TempDir(), "seaweedfs_volume_server_it_bin")
if err := os.MkdirAll(binDir, 0o755); err != nil {
return "", fmt.Errorf("create binary directory %s: %w", binDir, err)
}
binPath := filepath.Join(binDir, "weed")
if isExecutableFile(binPath) {
return binPath, nil
}
cmd := exec.Command("go", "build", "-o", binPath, ".")
cmd.Dir = filepath.Join(repoRoot, "weed")
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
if err := cmd.Run(); err != nil {
return "", fmt.Errorf("build weed binary: %w\n%s", err, out.String())
}
if !isExecutableFile(binPath) {
return "", fmt.Errorf("built weed binary is not executable: %s", binPath)
}
return binPath, nil
}
func isExecutableFile(path string) bool {
info, err := os.Stat(path)
if err != nil || info.IsDir() {
return false
}
mode := info.Mode().Perm()
return mode&0o111 != 0
}
func (c *Cluster) tailLog(logName string) string {
f, err := os.Open(filepath.Join(c.logsDir, logName))
if err != nil {
return ""
}
defer f.Close()
scanner := bufio.NewScanner(f)
lines := make([]string, 0, 40)
for scanner.Scan() {
lines = append(lines, scanner.Text())
if len(lines) > 40 {
lines = lines[1:]
}
}
return strings.Join(lines, "\n")
}
func (c *Cluster) MasterAddress() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.masterPort))
}
func (c *Cluster) VolumeAdminAddress() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePort))
}
func (c *Cluster) VolumePublicAddress() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePubPort))
}
func (c *Cluster) VolumeGRPCAddress() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumeGrpcPort))
}
// VolumeServerAddress returns SeaweedFS server address format: ip:httpPort.grpcPort
func (c *Cluster) VolumeServerAddress() string {
return fmt.Sprintf("%s.%d", c.VolumeAdminAddress(), c.volumeGrpcPort)
}
func (c *Cluster) MasterURL() string {
return "http://" + c.MasterAddress()
}
func (c *Cluster) VolumeAdminURL() string {
return "http://" + c.VolumeAdminAddress()
}
func (c *Cluster) VolumePublicURL() string {
return "http://" + c.VolumePublicAddress()
}
func (c *Cluster) BaseDir() string {
return c.baseDir
}

293
test/volume_server/framework/cluster_dual.go

@ -0,0 +1,293 @@
package framework
import (
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"sync"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
type DualVolumeCluster struct {
testingTB testing.TB
profile matrix.Profile
weedBinary string
baseDir string
configDir string
logsDir string
keepLogs bool
masterPort int
masterGrpcPort int
volumePort0 int
volumeGrpcPort0 int
volumePubPort0 int
volumePort1 int
volumeGrpcPort1 int
volumePubPort1 int
masterCmd *exec.Cmd
volumeCmd0 *exec.Cmd
volumeCmd1 *exec.Cmd
cleanupOnce sync.Once
}
func StartDualVolumeCluster(t testing.TB, profile matrix.Profile) *DualVolumeCluster {
t.Helper()
weedBinary, err := FindOrBuildWeedBinary()
if err != nil {
t.Fatalf("resolve weed binary: %v", err)
}
baseDir, keepLogs, err := newWorkDir()
if err != nil {
t.Fatalf("create temp test directory: %v", err)
}
configDir := filepath.Join(baseDir, "config")
logsDir := filepath.Join(baseDir, "logs")
masterDataDir := filepath.Join(baseDir, "master")
volumeDataDir0 := filepath.Join(baseDir, "volume0")
volumeDataDir1 := filepath.Join(baseDir, "volume1")
for _, dir := range []string{configDir, logsDir, masterDataDir, volumeDataDir0, volumeDataDir1} {
if mkErr := os.MkdirAll(dir, 0o755); mkErr != nil {
t.Fatalf("create %s: %v", dir, mkErr)
}
}
if err = writeSecurityConfig(configDir, profile); err != nil {
t.Fatalf("write security config: %v", err)
}
masterPort, masterGrpcPort, err := allocateMasterPortPair()
if err != nil {
t.Fatalf("allocate master port pair: %v", err)
}
ports, err := allocatePorts(6)
if err != nil {
t.Fatalf("allocate volume ports: %v", err)
}
c := &DualVolumeCluster{
testingTB: t,
profile: profile,
weedBinary: weedBinary,
baseDir: baseDir,
configDir: configDir,
logsDir: logsDir,
keepLogs: keepLogs,
masterPort: masterPort,
masterGrpcPort: masterGrpcPort,
volumePort0: ports[0],
volumeGrpcPort0: ports[1],
volumePubPort0: ports[0],
volumePort1: ports[2],
volumeGrpcPort1: ports[3],
volumePubPort1: ports[2],
}
if profile.SplitPublicPort {
c.volumePubPort0 = ports[4]
c.volumePubPort1 = ports[5]
}
if err = c.startMaster(masterDataDir); err != nil {
c.Stop()
t.Fatalf("start master: %v", err)
}
if err = c.waitForHTTP(c.MasterURL() + "/dir/status"); err != nil {
masterLog := c.tailLog("master.log")
c.Stop()
t.Fatalf("wait for master readiness: %v\nmaster log tail:\n%s", err, masterLog)
}
if err = c.startVolume(0, volumeDataDir0); err != nil {
masterLog := c.tailLog("master.log")
c.Stop()
t.Fatalf("start first volume server: %v\nmaster log tail:\n%s", err, masterLog)
}
if err = c.waitForHTTP(c.VolumeAdminURL(0) + "/status"); err != nil {
volumeLog := c.tailLog("volume0.log")
c.Stop()
t.Fatalf("wait for first volume readiness: %v\nvolume log tail:\n%s", err, volumeLog)
}
if err = c.waitForTCP(c.VolumeGRPCAddress(0)); err != nil {
volumeLog := c.tailLog("volume0.log")
c.Stop()
t.Fatalf("wait for first volume grpc readiness: %v\nvolume log tail:\n%s", err, volumeLog)
}
if err = c.startVolume(1, volumeDataDir1); err != nil {
volumeLog := c.tailLog("volume0.log")
c.Stop()
t.Fatalf("start second volume server: %v\nfirst volume log tail:\n%s", err, volumeLog)
}
if err = c.waitForHTTP(c.VolumeAdminURL(1) + "/status"); err != nil {
volumeLog := c.tailLog("volume1.log")
c.Stop()
t.Fatalf("wait for second volume readiness: %v\nvolume log tail:\n%s", err, volumeLog)
}
if err = c.waitForTCP(c.VolumeGRPCAddress(1)); err != nil {
volumeLog := c.tailLog("volume1.log")
c.Stop()
t.Fatalf("wait for second volume grpc readiness: %v\nvolume log tail:\n%s", err, volumeLog)
}
t.Cleanup(func() {
c.Stop()
})
return c
}
func (c *DualVolumeCluster) Stop() {
if c == nil {
return
}
c.cleanupOnce.Do(func() {
stopProcess(c.volumeCmd1)
stopProcess(c.volumeCmd0)
stopProcess(c.masterCmd)
if !c.keepLogs && !c.testingTB.Failed() {
_ = os.RemoveAll(c.baseDir)
} else if c.baseDir != "" {
c.testingTB.Logf("volume server integration logs kept at %s", c.baseDir)
}
})
}
func (c *DualVolumeCluster) startMaster(dataDir string) error {
logFile, err := os.Create(filepath.Join(c.logsDir, "master.log"))
if err != nil {
return err
}
args := []string{
"-config_dir=" + c.configDir,
"master",
"-ip=127.0.0.1",
"-port=" + strconv.Itoa(c.masterPort),
"-port.grpc=" + strconv.Itoa(c.masterGrpcPort),
"-mdir=" + dataDir,
"-peers=none",
"-volumeSizeLimitMB=" + strconv.Itoa(testVolumeSizeLimitMB),
"-defaultReplication=000",
}
c.masterCmd = exec.Command(c.weedBinary, args...)
c.masterCmd.Dir = c.baseDir
c.masterCmd.Stdout = logFile
c.masterCmd.Stderr = logFile
return c.masterCmd.Start()
}
func (c *DualVolumeCluster) startVolume(index int, dataDir string) error {
logName := fmt.Sprintf("volume%d.log", index)
logFile, err := os.Create(filepath.Join(c.logsDir, logName))
if err != nil {
return err
}
volumePort := c.volumePort0
volumeGrpcPort := c.volumeGrpcPort0
volumePubPort := c.volumePubPort0
if index == 1 {
volumePort = c.volumePort1
volumeGrpcPort = c.volumeGrpcPort1
volumePubPort = c.volumePubPort1
}
args := []string{
"-config_dir=" + c.configDir,
"volume",
"-ip=127.0.0.1",
"-port=" + strconv.Itoa(volumePort),
"-port.grpc=" + strconv.Itoa(volumeGrpcPort),
"-port.public=" + strconv.Itoa(volumePubPort),
"-dir=" + dataDir,
"-max=16",
"-master=127.0.0.1:" + strconv.Itoa(c.masterPort),
"-readMode=" + c.profile.ReadMode,
"-concurrentUploadLimitMB=" + strconv.Itoa(c.profile.ConcurrentUploadLimitMB),
"-concurrentDownloadLimitMB=" + strconv.Itoa(c.profile.ConcurrentDownloadLimitMB),
}
if c.profile.InflightUploadTimeout > 0 {
args = append(args, "-inflightUploadDataTimeout="+c.profile.InflightUploadTimeout.String())
}
if c.profile.InflightDownloadTimeout > 0 {
args = append(args, "-inflightDownloadDataTimeout="+c.profile.InflightDownloadTimeout.String())
}
cmd := exec.Command(c.weedBinary, args...)
cmd.Dir = c.baseDir
cmd.Stdout = logFile
cmd.Stderr = logFile
if err = cmd.Start(); err != nil {
return err
}
if index == 1 {
c.volumeCmd1 = cmd
} else {
c.volumeCmd0 = cmd
}
return nil
}
func (c *DualVolumeCluster) waitForHTTP(url string) error {
return (&Cluster{}).waitForHTTP(url)
}
func (c *DualVolumeCluster) waitForTCP(addr string) error {
return (&Cluster{}).waitForTCP(addr)
}
func (c *DualVolumeCluster) tailLog(logName string) string {
return (&Cluster{logsDir: c.logsDir}).tailLog(logName)
}
func (c *DualVolumeCluster) MasterAddress() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.masterPort))
}
func (c *DualVolumeCluster) MasterURL() string {
return "http://" + c.MasterAddress()
}
func (c *DualVolumeCluster) VolumeAdminAddress(index int) string {
if index == 1 {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePort1))
}
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePort0))
}
func (c *DualVolumeCluster) VolumePublicAddress(index int) string {
if index == 1 {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePubPort1))
}
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePubPort0))
}
func (c *DualVolumeCluster) VolumeGRPCAddress(index int) string {
if index == 1 {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumeGrpcPort1))
}
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumeGrpcPort0))
}
func (c *DualVolumeCluster) VolumeAdminURL(index int) string {
return "http://" + c.VolumeAdminAddress(index)
}
func (c *DualVolumeCluster) VolumePublicURL(index int) string {
return "http://" + c.VolumePublicAddress(index)
}

91
test/volume_server/framework/cluster_with_filer.go

@ -0,0 +1,91 @@
package framework
import (
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
type ClusterWithFiler struct {
*Cluster
filerCmd *exec.Cmd
filerPort int
filerGrpcPort int
}
func StartSingleVolumeClusterWithFiler(t testing.TB, profile matrix.Profile) *ClusterWithFiler {
t.Helper()
baseCluster := StartSingleVolumeCluster(t, profile)
ports, err := allocatePorts(2)
if err != nil {
t.Fatalf("allocate filer ports: %v", err)
}
filerDataDir := filepath.Join(baseCluster.baseDir, "filer")
if mkErr := os.MkdirAll(filerDataDir, 0o755); mkErr != nil {
t.Fatalf("create filer data dir: %v", mkErr)
}
logFile, err := os.Create(filepath.Join(baseCluster.logsDir, "filer.log"))
if err != nil {
t.Fatalf("create filer log file: %v", err)
}
filerPort := ports[0]
filerGrpcPort := ports[1]
args := []string{
"-config_dir=" + baseCluster.configDir,
"filer",
"-master=127.0.0.1:" + strconv.Itoa(baseCluster.masterPort),
"-ip=127.0.0.1",
"-port=" + strconv.Itoa(filerPort),
"-port.grpc=" + strconv.Itoa(filerGrpcPort),
"-defaultStoreDir=" + filerDataDir,
}
filerCmd := exec.Command(baseCluster.weedBinary, args...)
filerCmd.Dir = baseCluster.baseDir
filerCmd.Stdout = logFile
filerCmd.Stderr = logFile
if err = filerCmd.Start(); err != nil {
t.Fatalf("start filer: %v", err)
}
if err = baseCluster.waitForTCP(net.JoinHostPort("127.0.0.1", strconv.Itoa(filerGrpcPort))); err != nil {
filerLogTail := baseCluster.tailLog("filer.log")
stopProcess(filerCmd)
t.Fatalf("wait for filer grpc readiness: %v\nfiler log tail:\n%s", err, filerLogTail)
}
t.Cleanup(func() {
stopProcess(filerCmd)
})
return &ClusterWithFiler{
Cluster: baseCluster,
filerCmd: filerCmd,
filerPort: filerPort,
filerGrpcPort: filerGrpcPort,
}
}
func (c *ClusterWithFiler) FilerAddress() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.filerPort))
}
func (c *ClusterWithFiler) FilerGRPCAddress() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.filerGrpcPort))
}
func (c *ClusterWithFiler) FilerServerAddress() string {
return fmt.Sprintf("%s.%d", c.FilerAddress(), c.filerGrpcPort)
}

8
test/volume_server/framework/fault_injection.go

@ -0,0 +1,8 @@
package framework
// Phase 0 placeholder for future fault injection utilities.
//
// Planned extensions:
// - restart/kill selected processes
// - temporary network isolation hooks
// - master or peer outage helpers for proxy/replication branch coverage

28
test/volume_server/framework/grpc_client.go

@ -0,0 +1,28 @@
package framework
import (
"context"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func DialVolumeServer(t testing.TB, address string) (*grpc.ClientConn, volume_server_pb.VolumeServerClient) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, address,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
)
if err != nil {
t.Fatalf("dial volume grpc %s: %v", address, err)
}
return conn, volume_server_pb.NewVolumeServerClient(conn)
}

34
test/volume_server/framework/http_client.go

@ -0,0 +1,34 @@
package framework
import (
"io"
"net/http"
"testing"
"time"
)
func NewHTTPClient() *http.Client {
return &http.Client{Timeout: 10 * time.Second}
}
func DoRequest(t testing.TB, client *http.Client, req *http.Request) *http.Response {
t.Helper()
resp, err := client.Do(req)
if err != nil {
t.Fatalf("http request %s %s: %v", req.Method, req.URL.String(), err)
}
return resp
}
func ReadAllAndClose(t testing.TB, resp *http.Response) []byte {
t.Helper()
if resp == nil {
return nil
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("read response body: %v", err)
}
return body
}

56
test/volume_server/framework/volume_fixture.go

@ -0,0 +1,56 @@
package framework
import (
"bytes"
"context"
"fmt"
"net/http"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
)
func AllocateVolume(t testing.TB, client volume_server_pb.VolumeServerClient, volumeID uint32, collection string) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := client.AllocateVolume(ctx, &volume_server_pb.AllocateVolumeRequest{
VolumeId: volumeID,
Collection: collection,
Replication: "000",
Version: uint32(needle.GetCurrentVersion()),
})
if err != nil {
t.Fatalf("allocate volume %d: %v", volumeID, err)
}
}
func NewFileID(volumeID uint32, key uint64, cookie uint32) string {
return needle.NewFileId(needle.VolumeId(volumeID), key, cookie).String()
}
func UploadBytes(t testing.TB, client *http.Client, volumeURL, fid string, data []byte) *http.Response {
t.Helper()
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/%s", volumeURL, fid), bytes.NewReader(data))
if err != nil {
t.Fatalf("build upload request: %v", err)
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(data)))
return DoRequest(t, client, req)
}
func ReadBytes(t testing.TB, client *http.Client, volumeURL, fid string) *http.Response {
t.Helper()
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", volumeURL, fid), nil)
if err != nil {
t.Fatalf("build read request: %v", err)
}
return DoRequest(t, client, req)
}

445
test/volume_server/grpc/admin_extra_test.go

@ -0,0 +1,445 @@
package volume_server_grpc_test
import (
"context"
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/cluster"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestVolumeNeedleStatusForUploadedFile(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(21)
const needleID = uint64(778899)
const cookie = uint32(0xA1B2C3D4)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
client := framework.NewHTTPClient()
payload := []byte("needle-status-payload")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
statusResp, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: volumeID,
NeedleId: needleID,
})
if err != nil {
t.Fatalf("VolumeNeedleStatus failed: %v", err)
}
if statusResp.GetNeedleId() != needleID {
t.Fatalf("needle id mismatch: got %d want %d", statusResp.GetNeedleId(), needleID)
}
if statusResp.GetCookie() != cookie {
t.Fatalf("cookie mismatch: got %d want %d", statusResp.GetCookie(), cookie)
}
if statusResp.GetSize() == 0 {
t.Fatalf("expected non-zero needle size")
}
}
func TestVolumeNeedleStatusViaEcShardsWhenNormalVolumeUnmounted(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(26)
const needleID = uint64(778900)
const cookie = uint32(0xA1B2C3D5)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("needle-status-ec-path-payload")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
})
if err != nil {
t.Fatalf("VolumeEcShardsMount data shards failed: %v", err)
}
_, err = grpcClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{
VolumeId: volumeID,
})
if err != nil {
t.Fatalf("VolumeUnmount failed: %v", err)
}
statusResp, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: volumeID,
NeedleId: needleID,
})
if err != nil {
t.Fatalf("VolumeNeedleStatus via EC shards failed: %v", err)
}
if statusResp.GetNeedleId() != needleID {
t.Fatalf("needle id mismatch: got %d want %d", statusResp.GetNeedleId(), needleID)
}
if statusResp.GetCookie() != cookie {
t.Fatalf("cookie mismatch: got %d want %d", statusResp.GetCookie(), cookie)
}
if statusResp.GetSize() == 0 {
t.Fatalf("expected non-zero needle size from EC-backed needle status")
}
_, err = grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: volumeID,
NeedleId: needleID + 999999,
})
if err == nil || !strings.Contains(strings.ToLower(err.Error()), "not found") {
t.Fatalf("VolumeNeedleStatus via EC shards missing-needle error mismatch: %v", err)
}
}
func TestVolumeNeedleStatusMissingVolumeAndNeedle(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(25)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: 99925,
NeedleId: 1,
})
if err == nil {
t.Fatalf("VolumeNeedleStatus should fail for missing volume")
}
if !strings.Contains(strings.ToLower(err.Error()), "volume not found") {
t.Fatalf("VolumeNeedleStatus missing-volume error mismatch: %v", err)
}
_, err = grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: volumeID,
NeedleId: 123456789,
})
if err == nil {
t.Fatalf("VolumeNeedleStatus should fail for missing needle")
}
if !strings.Contains(strings.ToLower(err.Error()), "not found") {
t.Fatalf("VolumeNeedleStatus missing-needle error mismatch: %v", err)
}
}
func mustNewRequest(t testing.TB, method, url string) *http.Request {
t.Helper()
req, err := http.NewRequest(method, url, nil)
if err != nil {
t.Fatalf("create request %s %s: %v", method, url, err)
}
return req
}
func TestVolumeConfigureInvalidReplication(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(22)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
VolumeId: volumeID,
Replication: "bad-replication",
})
if err != nil {
t.Fatalf("VolumeConfigure returned grpc error: %v", err)
}
if resp.GetError() == "" {
t.Fatalf("VolumeConfigure expected response error for invalid replication")
}
if !strings.Contains(strings.ToLower(resp.GetError()), "replication") {
t.Fatalf("VolumeConfigure error should mention replication, got: %q", resp.GetError())
}
}
func TestVolumeConfigureSuccessAndMissingRollbackPath(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(24)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
successResp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
VolumeId: volumeID,
Replication: "000",
})
if err != nil {
t.Fatalf("VolumeConfigure success path returned grpc error: %v", err)
}
if successResp.GetError() != "" {
t.Fatalf("VolumeConfigure success path expected empty response error, got: %q", successResp.GetError())
}
statusResp, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeStatus after successful configure failed: %v", err)
}
if statusResp.GetIsReadOnly() {
t.Fatalf("VolumeStatus after configure expected writable volume")
}
missingResp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
VolumeId: 99024,
Replication: "000",
})
if err != nil {
t.Fatalf("VolumeConfigure missing-volume branch should return response error, got grpc error: %v", err)
}
if missingResp.GetError() == "" {
t.Fatalf("VolumeConfigure missing-volume expected non-empty response error")
}
lower := strings.ToLower(missingResp.GetError())
if !strings.Contains(lower, "not found on disk") {
t.Fatalf("VolumeConfigure missing-volume error should mention not found on disk, got: %q", missingResp.GetError())
}
if !strings.Contains(lower, "failed to restore mount") {
t.Fatalf("VolumeConfigure missing-volume error should include remount rollback failure, got: %q", missingResp.GetError())
}
}
func TestPingVolumeTargetAndLeaveAffectsHealthz(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
pingResp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
TargetType: cluster.VolumeServerType,
Target: clusterHarness.VolumeServerAddress(),
})
if err != nil {
t.Fatalf("Ping target volume server failed: %v", err)
}
if pingResp.GetRemoteTimeNs() == 0 {
t.Fatalf("expected remote timestamp from ping target volume server")
}
if _, err = grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
t.Fatalf("VolumeServerLeave failed: %v", err)
}
client := framework.NewHTTPClient()
healthURL := clusterHarness.VolumeAdminURL() + "/healthz"
deadline := time.Now().Add(5 * time.Second)
for {
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, healthURL))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode == http.StatusServiceUnavailable {
return
}
if time.Now().After(deadline) {
t.Fatalf("expected healthz to return 503 after leave, got %d", resp.StatusCode)
}
time.Sleep(100 * time.Millisecond)
}
}
func TestVolumeServerLeaveIsIdempotent(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if _, err := grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
t.Fatalf("first VolumeServerLeave failed: %v", err)
}
if _, err := grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
t.Fatalf("second VolumeServerLeave should be idempotent success, got: %v", err)
}
client := framework.NewHTTPClient()
healthURL := clusterHarness.VolumeAdminURL() + "/healthz"
deadline := time.Now().Add(5 * time.Second)
for {
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, healthURL))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode == http.StatusServiceUnavailable {
return
}
if time.Now().After(deadline) {
t.Fatalf("expected healthz to stay 503 after repeated leave, got %d", resp.StatusCode)
}
time.Sleep(100 * time.Millisecond)
}
}
func TestPingUnknownAndUnreachableTargetPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
unknownResp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
TargetType: "unknown-type",
Target: "127.0.0.1:12345",
})
if err != nil {
t.Fatalf("Ping unknown target type should not return grpc error, got: %v", err)
}
if unknownResp.GetRemoteTimeNs() != 0 {
t.Fatalf("Ping unknown target type expected remote_time_ns=0, got %d", unknownResp.GetRemoteTimeNs())
}
if unknownResp.GetStopTimeNs() < unknownResp.GetStartTimeNs() {
t.Fatalf("Ping unknown target type expected stop_time_ns >= start_time_ns")
}
_, err = grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
TargetType: cluster.MasterType,
Target: "127.0.0.1:1",
})
if err == nil {
t.Fatalf("Ping master target should fail when target is unreachable")
}
if !strings.Contains(err.Error(), "ping master") {
t.Fatalf("Ping master unreachable error mismatch: %v", err)
}
_, err = grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
TargetType: cluster.FilerType,
Target: "127.0.0.1:1",
})
if err == nil {
t.Fatalf("Ping filer target should fail when target is unreachable")
}
if !strings.Contains(err.Error(), "ping filer") {
t.Fatalf("Ping filer unreachable error mismatch: %v", err)
}
}
func TestPingMasterTargetSuccess(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
TargetType: cluster.MasterType,
Target: clusterHarness.MasterAddress(),
})
if err != nil {
t.Fatalf("Ping master target success path failed: %v", err)
}
if resp.GetRemoteTimeNs() == 0 {
t.Fatalf("Ping master target expected non-zero remote time")
}
if resp.GetStopTimeNs() < resp.GetStartTimeNs() {
t.Fatalf("Ping master target expected stop >= start, got start=%d stop=%d", resp.GetStartTimeNs(), resp.GetStopTimeNs())
}
}
func TestPingFilerTargetSuccess(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeClusterWithFiler(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
TargetType: cluster.FilerType,
Target: clusterHarness.FilerServerAddress(),
})
if err != nil {
t.Fatalf("Ping filer target success path failed: %v", err)
}
if resp.GetRemoteTimeNs() == 0 {
t.Fatalf("Ping filer target expected non-zero remote time")
}
if resp.GetStopTimeNs() < resp.GetStartTimeNs() {
t.Fatalf("Ping filer target expected stop >= start, got start=%d stop=%d", resp.GetStartTimeNs(), resp.GetStopTimeNs())
}
}

215
test/volume_server/grpc/admin_lifecycle_test.go

@ -0,0 +1,215 @@
package volume_server_grpc_test
import (
"context"
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestVolumeAdminLifecycleRPCs(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
const volumeID = uint32(11)
framework.AllocateVolume(t, client, volumeID, "")
statusResp, err := client.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeStatus failed: %v", err)
}
if statusResp.GetFileCount() != 0 {
t.Fatalf("new volume should be empty, got file_count=%d", statusResp.GetFileCount())
}
if _, err = client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: volumeID}); err != nil {
t.Fatalf("VolumeUnmount failed: %v", err)
}
if _, err = client.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{VolumeId: volumeID}); err != nil {
t.Fatalf("VolumeMount failed: %v", err)
}
if _, err = client.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: true}); err != nil {
t.Fatalf("VolumeDelete failed: %v", err)
}
_, err = client.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err == nil {
t.Fatalf("VolumeStatus should fail after delete")
}
if st, ok := status.FromError(err); !ok || st.Code() == codes.OK {
t.Fatalf("VolumeStatus error should be a non-OK grpc status, got: %v", err)
}
}
func TestVolumeDeleteOnlyEmptyVariants(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(13)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 66001, 0x11223344)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("volume-delete-only-empty"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: true})
if err == nil || !strings.Contains(err.Error(), "volume not empty") {
t.Fatalf("VolumeDelete only_empty=true expected volume-not-empty error, got: %v", err)
}
_, err = grpcClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: false})
if err != nil {
t.Fatalf("VolumeDelete only_empty=false failed: %v", err)
}
_, err = grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err == nil {
t.Fatalf("VolumeStatus should fail after non-empty delete with only_empty=false")
}
}
func TestMaintenanceModeRejectsAllocateVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stateResp, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
_, err = client.AllocateVolume(ctx, &volume_server_pb.AllocateVolumeRequest{VolumeId: 12, Replication: "000"})
if err == nil {
t.Fatalf("AllocateVolume should fail when maintenance mode is enabled")
}
if !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("expected maintenance mode error, got: %v", err)
}
}
func TestAllocateDuplicateAndMountUnmountMissingVariants(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
const missingVolumeID = uint32(99331)
const volumeID = uint32(14)
if _, err := client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: missingVolumeID}); err != nil {
t.Fatalf("VolumeUnmount missing volume should be idempotent success, got: %v", err)
}
_, err := client.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{VolumeId: missingVolumeID})
if err == nil {
t.Fatalf("VolumeMount missing volume should fail")
}
if !strings.Contains(err.Error(), "not found on disk") {
t.Fatalf("VolumeMount missing volume error mismatch: %v", err)
}
framework.AllocateVolume(t, client, volumeID, "")
_, err = client.AllocateVolume(ctx, &volume_server_pb.AllocateVolumeRequest{
VolumeId: volumeID,
Replication: "000",
})
if err == nil {
t.Fatalf("AllocateVolume duplicate should fail")
}
if !strings.Contains(strings.ToLower(err.Error()), "already exists") {
t.Fatalf("AllocateVolume duplicate error mismatch: %v", err)
}
if _, err = client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: volumeID}); err != nil {
t.Fatalf("VolumeUnmount existing volume failed: %v", err)
}
if _, err = client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: volumeID}); err != nil {
t.Fatalf("VolumeUnmount already-unmounted volume should be idempotent success, got: %v", err)
}
if _, err = client.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{VolumeId: volumeID}); err != nil {
t.Fatalf("VolumeMount remount failed: %v", err)
}
}
func TestMaintenanceModeRejectsVolumeDelete(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(15)
framework.AllocateVolume(t, client, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stateResp, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
_, err = client.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: true})
if err == nil {
t.Fatalf("VolumeDelete should fail when maintenance mode is enabled")
}
if !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("expected maintenance mode error, got: %v", err)
}
}

177
test/volume_server/grpc/admin_readonly_collection_test.go

@ -0,0 +1,177 @@
package volume_server_grpc_test
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestVolumeMarkReadonlyAndWritableLifecycle(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(72)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
VolumeId: volumeID,
Persist: false,
})
if err != nil {
t.Fatalf("VolumeMarkReadonly failed: %v", err)
}
readOnlyStatus, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeStatus after readonly failed: %v", err)
}
if !readOnlyStatus.GetIsReadOnly() {
t.Fatalf("VolumeStatus expected readonly=true after VolumeMarkReadonly")
}
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeMarkWritable failed: %v", err)
}
writableStatus, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeStatus after writable failed: %v", err)
}
if writableStatus.GetIsReadOnly() {
t.Fatalf("VolumeStatus expected readonly=false after VolumeMarkWritable")
}
}
func TestVolumeMarkReadonlyPersistTrue(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(74)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
VolumeId: volumeID,
Persist: true,
})
if err != nil {
t.Fatalf("VolumeMarkReadonly persist=true failed: %v", err)
}
statusResp, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeStatus after persist readonly failed: %v", err)
}
if !statusResp.GetIsReadOnly() {
t.Fatalf("VolumeStatus expected readonly=true after persist readonly")
}
}
func TestVolumeMarkReadonlyWritableErrorPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{VolumeId: 98771, Persist: true})
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeMarkReadonly missing-volume error mismatch: %v", err)
}
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: 98772})
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeMarkWritable missing-volume error mismatch: %v", err)
}
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{
Maintenance: true,
Version: stateResp.GetState().GetVersion(),
},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
_, err = grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{VolumeId: 1, Persist: true})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeMarkReadonly maintenance error mismatch: %v", err)
}
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: 1})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeMarkWritable maintenance error mismatch: %v", err)
}
}
func TestDeleteCollectionRemovesVolumeAndIsIdempotent(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(73)
const collection = "it-delete-collection"
framework.AllocateVolume(t, grpcClient, volumeID, collection)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeStatus before DeleteCollection failed: %v", err)
}
_, err = grpcClient.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{Collection: collection})
if err != nil {
t.Fatalf("DeleteCollection existing collection failed: %v", err)
}
_, err = grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
if err == nil {
t.Fatalf("VolumeStatus should fail after collection delete")
}
if !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("VolumeStatus after DeleteCollection error mismatch: %v", err)
}
_, err = grpcClient.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{Collection: collection})
if err != nil {
t.Fatalf("DeleteCollection idempotent retry failed: %v", err)
}
}

264
test/volume_server/grpc/batch_delete_test.go

@ -0,0 +1,264 @@
package volume_server_grpc_test
import (
"bytes"
"context"
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestBatchDeleteInvalidFidAndMaintenanceMode(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{FileIds: []string{"bad-fid"}})
if err != nil {
t.Fatalf("BatchDelete invalid fid should return response, got error: %v", err)
}
if len(resp.GetResults()) != 1 {
t.Fatalf("expected one batch delete result, got %d", len(resp.GetResults()))
}
if got := resp.GetResults()[0].GetStatus(); got != 400 {
t.Fatalf("invalid fid expected status 400, got %d", got)
}
stateResp, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
_, err = client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{FileIds: []string{"1,1234567890ab"}})
if err == nil {
t.Fatalf("BatchDelete should fail when maintenance mode is enabled")
}
if !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("expected maintenance mode error, got: %v", err)
}
}
func TestBatchDeleteCookieMismatchAndSkipCheck(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(31)
const needleID = uint64(900001)
const correctCookie = uint32(0x1122AABB)
const wrongCookie = uint32(0x1122AABC)
framework.AllocateVolume(t, client, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, correctCookie)
uploadResp := framework.UploadBytes(t, httpClient, cluster.VolumeAdminURL(), fid, []byte("batch-delete-cookie-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
wrongCookieFid := framework.NewFileID(volumeID, needleID, wrongCookie)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
mismatchResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
FileIds: []string{wrongCookieFid},
SkipCookieCheck: false,
})
if err != nil {
t.Fatalf("BatchDelete with cookie check failed: %v", err)
}
if len(mismatchResp.GetResults()) != 1 {
t.Fatalf("BatchDelete cookie mismatch expected 1 result, got %d", len(mismatchResp.GetResults()))
}
if mismatchResp.GetResults()[0].GetStatus() != http.StatusBadRequest {
t.Fatalf("BatchDelete cookie mismatch expected status 400, got %d", mismatchResp.GetResults()[0].GetStatus())
}
skipCheckResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
FileIds: []string{wrongCookieFid},
SkipCookieCheck: true,
})
if err != nil {
t.Fatalf("BatchDelete skip cookie check failed: %v", err)
}
if len(skipCheckResp.GetResults()) != 1 {
t.Fatalf("BatchDelete skip check expected 1 result, got %d", len(skipCheckResp.GetResults()))
}
if skipCheckResp.GetResults()[0].GetStatus() != http.StatusAccepted {
t.Fatalf("BatchDelete skip check expected status 202, got %d", skipCheckResp.GetResults()[0].GetStatus())
}
readAfterDelete := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, readAfterDelete)
if readAfterDelete.StatusCode != http.StatusNotFound {
t.Fatalf("read after skip-check batch delete expected 404, got %d", readAfterDelete.StatusCode)
}
}
func TestBatchDeleteMixedStatusesAndMismatchStopsProcessing(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(32)
framework.AllocateVolume(t, client, volumeID, "")
const needleA = uint64(910001)
const needleB = uint64(910002)
const needleC = uint64(910003)
const cookieA = uint32(0x11111111)
const cookieB = uint32(0x22222222)
const cookieC = uint32(0x33333333)
httpClient := framework.NewHTTPClient()
fidA := framework.NewFileID(volumeID, needleA, cookieA)
fidB := framework.NewFileID(volumeID, needleB, cookieB)
fidC := framework.NewFileID(volumeID, needleC, cookieC)
for _, tc := range []struct {
fid string
body string
}{
{fid: fidA, body: "batch-delete-mixed-a"},
{fid: fidB, body: "batch-delete-mixed-b"},
{fid: fidC, body: "batch-delete-mixed-c"},
} {
uploadResp := framework.UploadBytes(t, httpClient, cluster.VolumeAdminURL(), tc.fid, []byte(tc.body))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload %s expected 201, got %d", tc.fid, uploadResp.StatusCode)
}
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
missingFid := framework.NewFileID(volumeID, 919999, 0x44444444)
mixedResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
FileIds: []string{"bad-fid", fidA, missingFid},
})
if err != nil {
t.Fatalf("BatchDelete mixed status request failed: %v", err)
}
if len(mixedResp.GetResults()) != 3 {
t.Fatalf("BatchDelete mixed status expected 3 results, got %d", len(mixedResp.GetResults()))
}
if mixedResp.GetResults()[0].GetStatus() != http.StatusBadRequest {
t.Fatalf("BatchDelete mixed result[0] expected 400, got %d", mixedResp.GetResults()[0].GetStatus())
}
if mixedResp.GetResults()[1].GetStatus() != http.StatusAccepted {
t.Fatalf("BatchDelete mixed result[1] expected 202, got %d", mixedResp.GetResults()[1].GetStatus())
}
if mixedResp.GetResults()[2].GetStatus() != http.StatusNotFound {
t.Fatalf("BatchDelete mixed result[2] expected 404, got %d", mixedResp.GetResults()[2].GetStatus())
}
readDeletedA := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fidA)
_ = framework.ReadAllAndClose(t, readDeletedA)
if readDeletedA.StatusCode != http.StatusNotFound {
t.Fatalf("fidA should be deleted after batch delete, got status %d", readDeletedA.StatusCode)
}
wrongCookieB := framework.NewFileID(volumeID, needleB, cookieB+1)
stopResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
FileIds: []string{wrongCookieB, fidC},
})
if err != nil {
t.Fatalf("BatchDelete mismatch-stop request failed: %v", err)
}
if len(stopResp.GetResults()) != 1 {
t.Fatalf("BatchDelete mismatch-stop expected 1 result due early break, got %d", len(stopResp.GetResults()))
}
if stopResp.GetResults()[0].GetStatus() != http.StatusBadRequest {
t.Fatalf("BatchDelete mismatch-stop expected 400, got %d", stopResp.GetResults()[0].GetStatus())
}
readB := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fidB)
_ = framework.ReadAllAndClose(t, readB)
if readB.StatusCode != http.StatusOK {
t.Fatalf("fidB should remain after cookie mismatch path, got %d", readB.StatusCode)
}
readC := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fidC)
_ = framework.ReadAllAndClose(t, readC)
if readC.StatusCode != http.StatusOK {
t.Fatalf("fidC should remain when batch processing stops on mismatch, got %d", readC.StatusCode)
}
}
func TestBatchDeleteRejectsChunkManifestNeedles(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(33)
framework.AllocateVolume(t, client, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 920001, 0x5555AAAA)
req, err := http.NewRequest(http.MethodPost, cluster.VolumeAdminURL()+"/"+fid+"?cm=true", bytes.NewReader([]byte("manifest-placeholder-payload")))
if err != nil {
t.Fatalf("create chunk manifest upload request: %v", err)
}
req.Header.Set("Content-Type", "application/octet-stream")
uploadResp := framework.DoRequest(t, httpClient, req)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("chunk manifest upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{FileIds: []string{fid}})
if err != nil {
t.Fatalf("BatchDelete chunk manifest should return response, got grpc error: %v", err)
}
if len(resp.GetResults()) != 1 {
t.Fatalf("BatchDelete chunk manifest expected one result, got %d", len(resp.GetResults()))
}
if resp.GetResults()[0].GetStatus() != http.StatusNotAcceptable {
t.Fatalf("BatchDelete chunk manifest expected status 406, got %d", resp.GetResults()[0].GetStatus())
}
if !strings.Contains(resp.GetResults()[0].GetError(), "ChunkManifest") {
t.Fatalf("BatchDelete chunk manifest expected error mentioning ChunkManifest, got %q", resp.GetResults()[0].GetError())
}
readResp := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusOK {
t.Fatalf("chunk manifest should not be deleted by BatchDelete reject path, got %d", readResp.StatusCode)
}
}

431
test/volume_server/grpc/copy_receive_variants_test.go

@ -0,0 +1,431 @@
package volume_server_grpc_test
import (
"context"
"io"
"math"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestVolumeIncrementalCopyDataAndNoDataPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(91)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 770001, 0x1122AABB)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("incremental-copy-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dataStream, err := grpcClient.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{
VolumeId: volumeID,
SinceNs: 0,
})
if err != nil {
t.Fatalf("VolumeIncrementalCopy start failed: %v", err)
}
totalBytes := 0
for {
msg, recvErr := dataStream.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
t.Fatalf("VolumeIncrementalCopy recv failed: %v", recvErr)
}
totalBytes += len(msg.GetFileContent())
}
if totalBytes == 0 {
t.Fatalf("VolumeIncrementalCopy expected streamed bytes for since_ns=0")
}
noDataStream, err := grpcClient.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{
VolumeId: volumeID,
SinceNs: math.MaxUint64,
})
if err != nil {
t.Fatalf("VolumeIncrementalCopy no-data start failed: %v", err)
}
_, err = noDataStream.Recv()
if err != io.EOF {
t.Fatalf("VolumeIncrementalCopy no-data expected EOF, got: %v", err)
}
}
func TestCopyFileIgnoreNotFoundAndStopOffsetZeroPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(92)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
missingNoIgnore, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Ext: ".definitely-missing",
CompactionRevision: math.MaxUint32,
StopOffset: 1,
IgnoreSourceFileNotFound: false,
})
if err == nil {
_, err = missingNoIgnore.Recv()
}
if err == nil {
t.Fatalf("CopyFile should fail for missing source file when ignore_source_file_not_found=false")
}
missingIgnored, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Ext: ".definitely-missing",
CompactionRevision: math.MaxUint32,
StopOffset: 1,
IgnoreSourceFileNotFound: true,
})
if err != nil {
t.Fatalf("CopyFile ignore-not-found start failed: %v", err)
}
_, err = missingIgnored.Recv()
if err != io.EOF {
t.Fatalf("CopyFile ignore-not-found expected EOF, got: %v", err)
}
stopZeroStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Ext: ".definitely-missing",
CompactionRevision: math.MaxUint32,
StopOffset: 0,
IgnoreSourceFileNotFound: false,
})
if err != nil {
t.Fatalf("CopyFile stop_offset=0 start failed: %v", err)
}
_, err = stopZeroStream.Recv()
if err != io.EOF {
t.Fatalf("CopyFile stop_offset=0 expected EOF, got: %v", err)
}
}
func TestCopyFileCompactionRevisionMismatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(94)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Ext: ".idx",
CompactionRevision: 1, // fresh volume starts at revision 0
StopOffset: 1,
})
if err == nil {
_, err = stream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "is compacted") {
t.Fatalf("CopyFile compaction mismatch error mismatch: %v", err)
}
}
func TestReceiveFileProtocolViolationResponses(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
contentFirstStream, err := grpcClient.ReceiveFile(ctx)
if err != nil {
t.Fatalf("ReceiveFile stream create failed: %v", err)
}
if err = contentFirstStream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_FileContent{
FileContent: []byte("content-before-info"),
},
}); err != nil {
t.Fatalf("ReceiveFile send content-first failed: %v", err)
}
contentFirstResp, err := contentFirstStream.CloseAndRecv()
if err != nil {
t.Fatalf("ReceiveFile content-first close failed: %v", err)
}
if !strings.Contains(contentFirstResp.GetError(), "file info must be sent first") {
t.Fatalf("ReceiveFile content-first response mismatch: %+v", contentFirstResp)
}
unknownTypeStream, err := grpcClient.ReceiveFile(ctx)
if err != nil {
t.Fatalf("ReceiveFile stream create for unknown-type failed: %v", err)
}
if err = unknownTypeStream.Send(&volume_server_pb.ReceiveFileRequest{}); err != nil {
t.Fatalf("ReceiveFile send unknown-type request failed: %v", err)
}
unknownTypeResp, err := unknownTypeStream.CloseAndRecv()
if err != nil {
t.Fatalf("ReceiveFile unknown-type close failed: %v", err)
}
if !strings.Contains(unknownTypeResp.GetError(), "unknown message type") {
t.Fatalf("ReceiveFile unknown-type response mismatch: %+v", unknownTypeResp)
}
}
func TestReceiveFileSuccessForRegularVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(95)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
payloadA := []byte("receive-file-chunk-a:")
payloadB := []byte("receive-file-chunk-b")
expected := append(append([]byte{}, payloadA...), payloadB...)
receiveStream, err := grpcClient.ReceiveFile(ctx)
if err != nil {
t.Fatalf("ReceiveFile stream create failed: %v", err)
}
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_Info{
Info: &volume_server_pb.ReceiveFileInfo{
VolumeId: volumeID,
Ext: ".tmprecv",
Collection: "",
IsEcVolume: false,
FileSize: uint64(len(expected)),
},
},
}); err != nil {
t.Fatalf("ReceiveFile send info failed: %v", err)
}
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadA},
}); err != nil {
t.Fatalf("ReceiveFile send payloadA failed: %v", err)
}
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadB},
}); err != nil {
t.Fatalf("ReceiveFile send payloadB failed: %v", err)
}
resp, err := receiveStream.CloseAndRecv()
if err != nil {
t.Fatalf("ReceiveFile close failed: %v", err)
}
if resp.GetError() != "" {
t.Fatalf("ReceiveFile unexpected error response: %+v", resp)
}
if resp.GetBytesWritten() != uint64(len(expected)) {
t.Fatalf("ReceiveFile bytes_written mismatch: got %d want %d", resp.GetBytesWritten(), len(expected))
}
copyStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Ext: ".tmprecv",
CompactionRevision: math.MaxUint32,
StopOffset: uint64(len(expected)),
})
if err != nil {
t.Fatalf("CopyFile for received data start failed: %v", err)
}
var copied []byte
for {
msg, recvErr := copyStream.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
t.Fatalf("CopyFile for received data recv failed: %v", recvErr)
}
copied = append(copied, msg.GetFileContent()...)
}
if string(copied) != string(expected) {
t.Fatalf("received file data mismatch: got %q want %q", string(copied), string(expected))
}
}
func TestReceiveFileSuccessForEcVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
const volumeID = uint32(96)
const collection = "ec-receive-success"
const ext = ".ec00"
payloadA := []byte("receive-ec-file-chunk-a:")
payloadB := []byte("receive-ec-file-chunk-b")
expected := append(append([]byte{}, payloadA...), payloadB...)
receiveStream, err := grpcClient.ReceiveFile(ctx)
if err != nil {
t.Fatalf("ReceiveFile stream create failed: %v", err)
}
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_Info{
Info: &volume_server_pb.ReceiveFileInfo{
VolumeId: volumeID,
Ext: ext,
Collection: collection,
IsEcVolume: true,
ShardId: 0,
FileSize: uint64(len(expected)),
},
},
}); err != nil {
t.Fatalf("ReceiveFile send EC info failed: %v", err)
}
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadA},
}); err != nil {
t.Fatalf("ReceiveFile send EC payloadA failed: %v", err)
}
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadB},
}); err != nil {
t.Fatalf("ReceiveFile send EC payloadB failed: %v", err)
}
resp, err := receiveStream.CloseAndRecv()
if err != nil {
t.Fatalf("ReceiveFile EC close failed: %v", err)
}
if resp.GetError() != "" {
t.Fatalf("ReceiveFile EC unexpected error response: %+v", resp)
}
if resp.GetBytesWritten() != uint64(len(expected)) {
t.Fatalf("ReceiveFile EC bytes_written mismatch: got %d want %d", resp.GetBytesWritten(), len(expected))
}
copyStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Collection: collection,
IsEcVolume: true,
Ext: ext,
CompactionRevision: math.MaxUint32,
StopOffset: uint64(len(expected)),
})
if err != nil {
t.Fatalf("CopyFile for received EC data start failed: %v", err)
}
var copied []byte
for {
msg, recvErr := copyStream.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
t.Fatalf("CopyFile for received EC data recv failed: %v", recvErr)
}
copied = append(copied, msg.GetFileContent()...)
}
if string(copied) != string(expected) {
t.Fatalf("received EC file data mismatch: got %q want %q", string(copied), string(expected))
}
}
func TestCopyFileEcVolumeIgnoreMissingSourcePaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
streamNoIgnore, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: 99601,
Collection: "ec-copy-missing",
IsEcVolume: true,
Ext: ".ec00",
CompactionRevision: math.MaxUint32,
StopOffset: 1,
IgnoreSourceFileNotFound: false,
})
if err == nil {
_, err = streamNoIgnore.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found ec volume id") {
t.Fatalf("CopyFile EC missing source error mismatch: %v", err)
}
streamIgnore, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: 99602,
Collection: "ec-copy-missing",
IsEcVolume: true,
Ext: ".ec00",
CompactionRevision: math.MaxUint32,
StopOffset: 1,
IgnoreSourceFileNotFound: true,
})
if err != nil {
t.Fatalf("CopyFile EC ignore-missing start failed: %v", err)
}
_, err = streamIgnore.Recv()
if err != io.EOF {
t.Fatalf("CopyFile EC ignore-missing expected EOF, got: %v", err)
}
}

284
test/volume_server/grpc/copy_sync_test.go

@ -0,0 +1,284 @@
package volume_server_grpc_test
import (
"context"
"io"
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestVolumeSyncStatusAndReadVolumeFileStatus(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(41)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
syncResp, err := grpcClient.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VolumeSyncStatus failed: %v", err)
}
if syncResp.GetVolumeId() != volumeID {
t.Fatalf("VolumeSyncStatus volume id mismatch: got %d want %d", syncResp.GetVolumeId(), volumeID)
}
statusResp, err := grpcClient.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("ReadVolumeFileStatus failed: %v", err)
}
if statusResp.GetVolumeId() != volumeID {
t.Fatalf("ReadVolumeFileStatus volume id mismatch: got %d want %d", statusResp.GetVolumeId(), volumeID)
}
if statusResp.GetVersion() == 0 {
t.Fatalf("ReadVolumeFileStatus expected non-zero version")
}
}
func TestCopyAndStreamMethodsMissingVolumePaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{VolumeId: 98761})
if err == nil {
t.Fatalf("VolumeSyncStatus should fail for missing volume")
}
incrementalStream, err := grpcClient.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{VolumeId: 98762, SinceNs: 0})
if err == nil {
_, err = incrementalStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("VolumeIncrementalCopy missing-volume error mismatch: %v", err)
}
readAllStream, err := grpcClient.ReadAllNeedles(ctx, &volume_server_pb.ReadAllNeedlesRequest{VolumeIds: []uint32{98763}})
if err == nil {
_, err = readAllStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("ReadAllNeedles missing-volume error mismatch: %v", err)
}
copyFileStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{VolumeId: 98764, Ext: ".dat", StopOffset: 1})
if err == nil {
_, err = copyFileStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("CopyFile missing-volume error mismatch: %v", err)
}
_, err = grpcClient.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{VolumeId: 98765})
if err == nil || !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("ReadVolumeFileStatus missing-volume error mismatch: %v", err)
}
}
func TestVolumeCopyAndReceiveFileMaintenanceRejection(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
copyStream, err := grpcClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{VolumeId: 1, SourceDataNode: "127.0.0.1:1234"})
if err == nil {
_, err = copyStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeCopy maintenance error mismatch: %v", err)
}
receiveClient, err := grpcClient.ReceiveFile(ctx)
if err != nil {
t.Fatalf("ReceiveFile client creation failed: %v", err)
}
_ = receiveClient.Send(&volume_server_pb.ReceiveFileRequest{
Data: &volume_server_pb.ReceiveFileRequest_Info{
Info: &volume_server_pb.ReceiveFileInfo{VolumeId: 1, Ext: ".dat"},
},
})
_, err = receiveClient.CloseAndRecv()
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("ReceiveFile maintenance error mismatch: %v", err)
}
}
func TestVolumeCopySuccessFromPeerAndMountsDestination(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer sourceConn.Close()
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer destConn.Close()
const volumeID = uint32(42)
framework.AllocateVolume(t, sourceClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 880001, 0x12345678)
payload := []byte("volume-copy-success-payload")
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload to source expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
copyStream, err := destClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
VolumeId: volumeID,
Collection: "",
SourceDataNode: clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1],
})
if err != nil {
t.Fatalf("VolumeCopy start failed: %v", err)
}
sawFinalAppendTimestamp := false
for {
msg, recvErr := copyStream.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
t.Fatalf("VolumeCopy recv failed: %v", recvErr)
}
if msg.GetLastAppendAtNs() > 0 {
sawFinalAppendTimestamp = true
}
}
if !sawFinalAppendTimestamp {
t.Fatalf("VolumeCopy expected final response with last_append_at_ns")
}
destReadResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
destReadBody := framework.ReadAllAndClose(t, destReadResp)
if destReadResp.StatusCode != http.StatusOK {
t.Fatalf("read from copied destination expected 200, got %d", destReadResp.StatusCode)
}
if string(destReadBody) != string(payload) {
t.Fatalf("destination copied payload mismatch: got %q want %q", string(destReadBody), string(payload))
}
}
func TestVolumeCopyOverwritesExistingDestinationVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer sourceConn.Close()
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer destConn.Close()
const volumeID = uint32(43)
framework.AllocateVolume(t, sourceClient, volumeID, "")
framework.AllocateVolume(t, destClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 880002, 0x23456789)
sourcePayload := []byte("volume-copy-overwrite-source")
destPayload := []byte("volume-copy-overwrite-destination-old")
sourceUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, sourcePayload)
_ = framework.ReadAllAndClose(t, sourceUploadResp)
if sourceUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload to source expected 201, got %d", sourceUploadResp.StatusCode)
}
destUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid, destPayload)
_ = framework.ReadAllAndClose(t, destUploadResp)
if destUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload to destination expected 201, got %d", destUploadResp.StatusCode)
}
destReadBeforeResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
destReadBeforeBody := framework.ReadAllAndClose(t, destReadBeforeResp)
if destReadBeforeResp.StatusCode != http.StatusOK {
t.Fatalf("destination pre-copy read expected 200, got %d", destReadBeforeResp.StatusCode)
}
if string(destReadBeforeBody) != string(destPayload) {
t.Fatalf("destination pre-copy payload mismatch: got %q want %q", string(destReadBeforeBody), string(destPayload))
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
copyStream, err := destClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
VolumeId: volumeID,
Collection: "",
SourceDataNode: clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1],
})
if err != nil {
t.Fatalf("VolumeCopy overwrite start failed: %v", err)
}
sawFinalAppendTimestamp := false
for {
msg, recvErr := copyStream.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
t.Fatalf("VolumeCopy overwrite recv failed: %v", recvErr)
}
if msg.GetLastAppendAtNs() > 0 {
sawFinalAppendTimestamp = true
}
}
if !sawFinalAppendTimestamp {
t.Fatalf("VolumeCopy overwrite expected final response with last_append_at_ns")
}
destReadAfterResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
destReadAfterBody := framework.ReadAllAndClose(t, destReadAfterResp)
if destReadAfterResp.StatusCode != http.StatusOK {
t.Fatalf("destination post-copy read expected 200, got %d", destReadAfterResp.StatusCode)
}
if string(destReadAfterBody) != string(sourcePayload) {
t.Fatalf("destination post-copy payload mismatch: got %q want %q", string(destReadAfterBody), string(sourcePayload))
}
}

146
test/volume_server/grpc/data_rw_test.go

@ -0,0 +1,146 @@
package volume_server_grpc_test
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestReadNeedleBlobAndMetaMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.ReadNeedleBlob(ctx, &volume_server_pb.ReadNeedleBlobRequest{
VolumeId: 99111,
Offset: 0,
Size: 16,
})
if err == nil {
t.Fatalf("ReadNeedleBlob should fail for missing volume")
}
if !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("ReadNeedleBlob missing volume error mismatch: %v", err)
}
_, err = grpcClient.ReadNeedleMeta(ctx, &volume_server_pb.ReadNeedleMetaRequest{
VolumeId: 99112,
NeedleId: 1,
Offset: 0,
Size: 16,
})
if err == nil {
t.Fatalf("ReadNeedleMeta should fail for missing volume")
}
if !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("ReadNeedleMeta missing volume error mismatch: %v", err)
}
}
func TestWriteNeedleBlobMaintenanceAndMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.WriteNeedleBlob(ctx, &volume_server_pb.WriteNeedleBlobRequest{
VolumeId: 99113,
NeedleId: 1,
NeedleBlob: []byte("abc"),
Size: 3,
})
if err == nil {
t.Fatalf("WriteNeedleBlob should fail for missing volume")
}
if !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("WriteNeedleBlob missing volume error mismatch: %v", err)
}
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
_, err = grpcClient.WriteNeedleBlob(ctx, &volume_server_pb.WriteNeedleBlobRequest{
VolumeId: 1,
NeedleId: 2,
NeedleBlob: []byte("def"),
Size: 3,
})
if err == nil {
t.Fatalf("WriteNeedleBlob should fail in maintenance mode")
}
if !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("WriteNeedleBlob maintenance mode error mismatch: %v", err)
}
}
func TestReadNeedleBlobAndMetaInvalidOffsets(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(92)
framework.AllocateVolume(t, grpcClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 880001, 0xCCDD1122)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("invalid-offset-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.ReadNeedleBlob(ctx, &volume_server_pb.ReadNeedleBlobRequest{
VolumeId: volumeID,
Offset: 1 << 40,
Size: 64,
})
if err == nil {
t.Fatalf("ReadNeedleBlob should fail for invalid offset")
}
if !strings.Contains(strings.ToLower(err.Error()), "read needle blob") {
t.Fatalf("ReadNeedleBlob invalid offset error mismatch: %v", err)
}
_, err = grpcClient.ReadNeedleMeta(ctx, &volume_server_pb.ReadNeedleMetaRequest{
VolumeId: volumeID,
NeedleId: 880001,
Offset: 1 << 40,
Size: 64,
})
if err == nil {
t.Fatalf("ReadNeedleMeta should fail for invalid offset")
}
}

273
test/volume_server/grpc/data_stream_success_test.go

@ -0,0 +1,273 @@
package volume_server_grpc_test
import (
"context"
"io"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/idx"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
)
func TestReadWriteNeedleBlobAndMetaRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(83)
const sourceNeedleID = uint64(333333)
const sourceCookie = uint32(0xABCD0102)
const clonedNeedleID = uint64(333334)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
payload := []byte("blob-roundtrip-content")
fid := framework.NewFileID(volumeID, sourceNeedleID, sourceCookie)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
fileStatus, err := grpcClient.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("ReadVolumeFileStatus failed: %v", err)
}
if fileStatus.GetIdxFileSize() == 0 {
t.Fatalf("expected non-zero idx file size after upload")
}
idxBytes := copyFileBytes(t, grpcClient, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Ext: ".idx",
CompactionRevision: fileStatus.GetCompactionRevision(),
StopOffset: fileStatus.GetIdxFileSize(),
})
offset, size := findNeedleOffsetAndSize(t, idxBytes, sourceNeedleID)
blobResp, err := grpcClient.ReadNeedleBlob(ctx, &volume_server_pb.ReadNeedleBlobRequest{
VolumeId: volumeID,
Offset: offset,
Size: size,
})
if err != nil {
t.Fatalf("ReadNeedleBlob failed: %v", err)
}
if len(blobResp.GetNeedleBlob()) == 0 {
t.Fatalf("ReadNeedleBlob returned empty blob")
}
metaResp, err := grpcClient.ReadNeedleMeta(ctx, &volume_server_pb.ReadNeedleMetaRequest{
VolumeId: volumeID,
NeedleId: sourceNeedleID,
Offset: offset,
Size: size,
})
if err != nil {
t.Fatalf("ReadNeedleMeta failed: %v", err)
}
if metaResp.GetCookie() != sourceCookie {
t.Fatalf("ReadNeedleMeta cookie mismatch: got %d want %d", metaResp.GetCookie(), sourceCookie)
}
_, err = grpcClient.WriteNeedleBlob(ctx, &volume_server_pb.WriteNeedleBlobRequest{
VolumeId: volumeID,
NeedleId: clonedNeedleID,
Size: size,
NeedleBlob: blobResp.GetNeedleBlob(),
})
if err != nil {
t.Fatalf("WriteNeedleBlob failed: %v", err)
}
clonedStatus, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: volumeID,
NeedleId: clonedNeedleID,
})
if err != nil {
t.Fatalf("VolumeNeedleStatus for cloned needle failed: %v", err)
}
if clonedStatus.GetNeedleId() != sourceNeedleID {
t.Fatalf("cloned needle status id mismatch: got %d want %d", clonedStatus.GetNeedleId(), sourceNeedleID)
}
if clonedStatus.GetCookie() != sourceCookie {
t.Fatalf("cloned needle cookie mismatch: got %d want %d", clonedStatus.GetCookie(), sourceCookie)
}
clonedReadResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), framework.NewFileID(volumeID, clonedNeedleID, sourceCookie))
clonedReadBody := framework.ReadAllAndClose(t, clonedReadResp)
if clonedReadResp.StatusCode != 200 {
t.Fatalf("cloned needle GET expected 200, got %d", clonedReadResp.StatusCode)
}
if string(clonedReadBody) != string(payload) {
t.Fatalf("cloned needle body mismatch: got %q want %q", string(clonedReadBody), string(payload))
}
}
func TestReadAllNeedlesStreamsUploadedRecords(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(84)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
expected := map[uint64]string{
444441: "read-all-needle-one",
444442: "read-all-needle-two",
}
for key, body := range expected {
resp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), framework.NewFileID(volumeID, key, 0xA0B0C0D0), []byte(body))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode != 201 {
t.Fatalf("upload for key %d expected 201, got %d", key, resp.StatusCode)
}
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.ReadAllNeedles(ctx, &volume_server_pb.ReadAllNeedlesRequest{VolumeIds: []uint32{volumeID}})
if err != nil {
t.Fatalf("ReadAllNeedles start failed: %v", err)
}
seen := map[uint64]string{}
for {
msg, recvErr := stream.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
t.Fatalf("ReadAllNeedles recv failed: %v", recvErr)
}
if _, wanted := expected[msg.GetNeedleId()]; wanted {
seen[msg.GetNeedleId()] = string(msg.GetNeedleBlob())
}
}
for key, body := range expected {
got, found := seen[key]
if !found {
t.Fatalf("ReadAllNeedles missing key %d in stream", key)
}
if got != body {
t.Fatalf("ReadAllNeedles body mismatch for key %d: got %q want %q", key, got, body)
}
}
}
func TestReadAllNeedlesExistingThenMissingVolumeAbortsStream(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const existingVolumeID = uint32(85)
const missingVolumeID = uint32(98585)
const needleID = uint64(445551)
framework.AllocateVolume(t, grpcClient, existingVolumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(existingVolumeID, needleID, 0xAA11BB22)
payload := "read-all-existing-then-missing"
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte(payload))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.ReadAllNeedles(ctx, &volume_server_pb.ReadAllNeedlesRequest{
VolumeIds: []uint32{existingVolumeID, missingVolumeID},
})
if err != nil {
t.Fatalf("ReadAllNeedles start failed: %v", err)
}
seenUploadedNeedle := false
for {
msg, recvErr := stream.Recv()
if recvErr == io.EOF {
t.Fatalf("ReadAllNeedles expected stream error for missing volume, got EOF")
}
if recvErr != nil {
if !strings.Contains(recvErr.Error(), "not found volume id") {
t.Fatalf("ReadAllNeedles missing-volume error mismatch: %v", recvErr)
}
break
}
if msg.GetNeedleId() == needleID && string(msg.GetNeedleBlob()) == payload {
seenUploadedNeedle = true
}
}
if !seenUploadedNeedle {
t.Fatalf("ReadAllNeedles should stream entries from existing volume before missing-volume abort")
}
}
func copyFileBytes(t testing.TB, grpcClient volume_server_pb.VolumeServerClient, req *volume_server_pb.CopyFileRequest) []byte {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.CopyFile(ctx, req)
if err != nil {
t.Fatalf("CopyFile start failed: %v", err)
}
var out []byte
for {
msg, recvErr := stream.Recv()
if recvErr == io.EOF {
return out
}
if recvErr != nil {
t.Fatalf("CopyFile recv failed: %v", recvErr)
}
out = append(out, msg.GetFileContent()...)
}
}
func findNeedleOffsetAndSize(t testing.TB, idxBytes []byte, needleID uint64) (offset int64, size int32) {
t.Helper()
for i := 0; i+types.NeedleMapEntrySize <= len(idxBytes); i += types.NeedleMapEntrySize {
key, entryOffset, entrySize := idx.IdxFileEntry(idxBytes[i : i+types.NeedleMapEntrySize])
if uint64(key) != needleID {
continue
}
if entryOffset.IsZero() || entrySize <= 0 {
continue
}
return entryOffset.ToActualOffset(), int32(entrySize)
}
t.Fatalf("needle id %d not found in idx entries", needleID)
return 0, 0
}

777
test/volume_server/grpc/erasure_coding_test.go

@ -0,0 +1,777 @@
package volume_server_grpc_test
import (
"context"
"io"
"math"
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestEcMaintenanceModeRejections(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{
Maintenance: true,
Version: stateResp.GetState().GetVersion(),
},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{VolumeId: 1, Collection: ""})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeEcShardsGenerate maintenance error mismatch: %v", err)
}
_, err = grpcClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: 1,
Collection: "",
SourceDataNode: "127.0.0.1:1",
ShardIds: []uint32{0},
})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeEcShardsCopy maintenance error mismatch: %v", err)
}
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: 1,
Collection: "",
ShardIds: []uint32{0},
})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeEcShardsDelete maintenance error mismatch: %v", err)
}
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
VolumeId: 1,
Collection: "",
FileKey: 1,
Version: 3,
})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeEcBlobDelete maintenance error mismatch: %v", err)
}
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
VolumeId: 1,
Collection: "",
})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeEcShardsToVolume maintenance error mismatch: %v", err)
}
}
func TestEcMissingInvalidAndNoopPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: 98791,
Collection: "",
})
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeEcShardsGenerate missing-volume error mismatch: %v", err)
}
rebuildResp, err := grpcClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{
VolumeId: 98792,
Collection: "ec-rebuild",
})
if err != nil {
t.Fatalf("VolumeEcShardsRebuild missing-volume should return empty success, got: %v", err)
}
if len(rebuildResp.GetRebuiltShardIds()) != 0 {
t.Fatalf("VolumeEcShardsRebuild expected no rebuilt shards for missing volume, got %v", rebuildResp.GetRebuiltShardIds())
}
_, err = grpcClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: 98793,
Collection: "ec-copy",
SourceDataNode: "127.0.0.1:1",
ShardIds: []uint32{0},
DiskId: 99,
})
if err == nil || !strings.Contains(err.Error(), "invalid disk_id") {
t.Fatalf("VolumeEcShardsCopy invalid-disk error mismatch: %v", err)
}
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: 98794,
Collection: "ec-delete",
ShardIds: []uint32{0, 1},
})
if err != nil {
t.Fatalf("VolumeEcShardsDelete missing-volume should be no-op success, got: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: 98795,
Collection: "ec-mount",
ShardIds: []uint32{0},
})
if err == nil {
t.Fatalf("VolumeEcShardsMount should fail for missing EC shards")
}
_, err = grpcClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{
VolumeId: 98796,
ShardIds: []uint32{0},
})
if err != nil {
t.Fatalf("VolumeEcShardsUnmount missing shards should be no-op success, got: %v", err)
}
readStream, err := grpcClient.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
VolumeId: 98797,
ShardId: 0,
Offset: 0,
Size: 1,
})
if err == nil {
_, err = readStream.Recv()
}
if err == nil || err == io.EOF {
t.Fatalf("VolumeEcShardRead should fail for missing EC volume")
}
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
VolumeId: 98798,
Collection: "ec-blob",
FileKey: 1,
Version: 3,
})
if err != nil {
t.Fatalf("VolumeEcBlobDelete missing local EC volume should be no-op success, got: %v", err)
}
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
VolumeId: 98799,
Collection: "ec-to-volume",
})
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeEcShardsToVolume missing-volume error mismatch: %v", err)
}
_, err = grpcClient.VolumeEcShardsInfo(ctx, &volume_server_pb.VolumeEcShardsInfoRequest{
VolumeId: 98800,
})
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeEcShardsInfo missing-volume error mismatch: %v", err)
}
}
func TestEcGenerateMountInfoUnmountLifecycle(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(115)
framework.AllocateVolume(t, grpcClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 990001, 0x1234ABCD)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-generate-lifecycle-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate success path failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0},
})
if err != nil {
t.Fatalf("VolumeEcShardsMount success path failed: %v", err)
}
infoResp, err := grpcClient.VolumeEcShardsInfo(ctx, &volume_server_pb.VolumeEcShardsInfoRequest{
VolumeId: volumeID,
})
if err != nil {
t.Fatalf("VolumeEcShardsInfo after mount failed: %v", err)
}
if len(infoResp.GetEcShardInfos()) == 0 {
t.Fatalf("VolumeEcShardsInfo expected non-empty shard infos after mount")
}
if infoResp.GetVolumeSize() == 0 {
t.Fatalf("VolumeEcShardsInfo expected non-zero volume size after mount")
}
_, err = grpcClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{
VolumeId: volumeID,
ShardIds: []uint32{0},
})
if err != nil {
t.Fatalf("VolumeEcShardsUnmount success path failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsInfo(ctx, &volume_server_pb.VolumeEcShardsInfoRequest{
VolumeId: volumeID,
})
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeEcShardsInfo after unmount expected not-found error, got: %v", err)
}
}
func TestEcShardReadAndBlobDeleteLifecycle(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(116)
const fileKey = uint64(990002)
framework.AllocateVolume(t, grpcClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, fileKey, 0x2233CCDD)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-shard-read-delete-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0},
})
if err != nil {
t.Fatalf("VolumeEcShardsMount failed: %v", err)
}
readStream, err := grpcClient.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
VolumeId: volumeID,
ShardId: 0,
Offset: 0,
Size: 1,
})
if err != nil {
t.Fatalf("VolumeEcShardRead start failed: %v", err)
}
firstChunk, err := readStream.Recv()
if err != nil {
t.Fatalf("VolumeEcShardRead recv failed: %v", err)
}
if len(firstChunk.GetData()) == 0 {
t.Fatalf("VolumeEcShardRead expected non-empty data chunk before deletion")
}
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
VolumeId: volumeID,
Collection: "",
FileKey: fileKey,
Version: uint32(needle.GetCurrentVersion()),
})
if err != nil {
t.Fatalf("VolumeEcBlobDelete first delete failed: %v", err)
}
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
VolumeId: volumeID,
Collection: "",
FileKey: fileKey,
Version: uint32(needle.GetCurrentVersion()),
})
if err != nil {
t.Fatalf("VolumeEcBlobDelete second delete should be idempotent success, got: %v", err)
}
deletedStream, err := grpcClient.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
VolumeId: volumeID,
ShardId: 0,
FileKey: fileKey,
Offset: 0,
Size: 1,
})
if err != nil {
t.Fatalf("VolumeEcShardRead deleted-check start failed: %v", err)
}
deletedMsg, err := deletedStream.Recv()
if err != nil {
t.Fatalf("VolumeEcShardRead deleted-check recv failed: %v", err)
}
if !deletedMsg.GetIsDeleted() {
t.Fatalf("VolumeEcShardRead expected IsDeleted=true after blob delete")
}
_, err = deletedStream.Recv()
if err != io.EOF {
t.Fatalf("VolumeEcShardRead deleted-check expected EOF after deleted marker, got: %v", err)
}
}
func TestEcRebuildMissingShardLifecycle(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(117)
framework.AllocateVolume(t, grpcClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 990003, 0x3344DDEE)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-rebuild-shard-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0},
})
if err != nil {
t.Fatalf("VolumeEcShardsDelete shard 0 failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0},
})
if err == nil {
t.Fatalf("VolumeEcShardsMount should fail when shard 0 has been deleted")
}
rebuildResp, err := grpcClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsRebuild failed: %v", err)
}
if len(rebuildResp.GetRebuiltShardIds()) == 0 {
t.Fatalf("VolumeEcShardsRebuild expected rebuilt shard ids")
}
foundShard0 := false
for _, shardID := range rebuildResp.GetRebuiltShardIds() {
if shardID == 0 {
foundShard0 = true
break
}
}
if !foundShard0 {
t.Fatalf("VolumeEcShardsRebuild expected shard 0 to be rebuilt, got %v", rebuildResp.GetRebuiltShardIds())
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0},
})
if err != nil {
t.Fatalf("VolumeEcShardsMount shard 0 after rebuild failed: %v", err)
}
}
func TestEcShardsToVolumeMissingShardAndNoLiveEntries(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
httpClient := framework.NewHTTPClient()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
t.Run("missing shard returns error", func(t *testing.T) {
const volumeID = uint32(118)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 990004, 0x4455EEFF)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-to-volume-missing-shard-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0},
})
if err != nil {
t.Fatalf("VolumeEcShardsDelete shard 0 failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{1},
})
if err != nil {
t.Fatalf("VolumeEcShardsMount shard 1 failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
VolumeId: volumeID,
Collection: "",
})
if err == nil || !strings.Contains(err.Error(), "missing shard 0") {
t.Fatalf("VolumeEcShardsToVolume missing-shard error mismatch: %v", err)
}
})
t.Run("no live entries returns failed precondition", func(t *testing.T) {
const volumeID = uint32(119)
const needleID = uint64(990005)
const cookie = uint32(0x5566FF11)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-no-live-entries-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, httpClient, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
}
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
})
if err != nil {
t.Fatalf("VolumeEcShardsMount data shards failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
VolumeId: volumeID,
Collection: "",
})
if err == nil {
t.Fatalf("VolumeEcShardsToVolume expected failed-precondition error when no live entries")
}
if status.Code(err) != codes.FailedPrecondition {
t.Fatalf("VolumeEcShardsToVolume no-live-entries expected FailedPrecondition, got %v (%v)", status.Code(err), err)
}
if !strings.Contains(err.Error(), erasure_coding.EcNoLiveEntriesSubstring) {
t.Fatalf("VolumeEcShardsToVolume no-live-entries error should mention %q, got %v", erasure_coding.EcNoLiveEntriesSubstring, err)
}
})
}
func TestEcShardsToVolumeSuccessRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(120)
const needleID = uint64(990006)
const cookie = uint32(0x66771122)
framework.AllocateVolume(t, grpcClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("ec-shards-to-volume-success-roundtrip-content")
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
})
if err != nil {
t.Fatalf("VolumeEcShardsMount data shards failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsToVolume success path failed: %v", err)
}
readResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid)
readBody := framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusOK {
t.Fatalf("post-conversion read expected 200, got %d", readResp.StatusCode)
}
if string(readBody) != string(payload) {
t.Fatalf("post-conversion payload mismatch: got %q want %q", string(readBody), string(payload))
}
}
func TestEcShardsDeleteLastShardRemovesEcx(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(121)
framework.AllocateVolume(t, grpcClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 990007, 0x77882233)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-delete-all-shards-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
}
// Verify .ecx is present before deleting all shards.
ecxBeforeDelete, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Collection: "",
IsEcVolume: true,
Ext: ".ecx",
CompactionRevision: math.MaxUint32,
StopOffset: 1,
})
if err != nil {
t.Fatalf("CopyFile .ecx before shard deletion start failed: %v", err)
}
if _, err = ecxBeforeDelete.Recv(); err != nil {
t.Fatalf("CopyFile .ecx before shard deletion recv failed: %v", err)
}
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: volumeID,
Collection: "",
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
})
if err != nil {
t.Fatalf("VolumeEcShardsDelete all shards failed: %v", err)
}
ecxAfterDelete, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Collection: "",
IsEcVolume: true,
Ext: ".ecx",
CompactionRevision: math.MaxUint32,
StopOffset: 1,
})
if err == nil {
_, err = ecxAfterDelete.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found ec volume id") {
t.Fatalf("CopyFile .ecx after deleting all shards should fail not-found, got: %v", err)
}
}
func TestEcShardsCopyFromPeerSuccess(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer sourceConn.Close()
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer destConn.Close()
const volumeID = uint32(122)
framework.AllocateVolume(t, sourceClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 990008, 0x88993344)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, []byte("ec-copy-from-peer-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("source upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := sourceClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: volumeID,
Collection: "",
})
if err != nil {
t.Fatalf("source VolumeEcShardsGenerate failed: %v", err)
}
sourceDataNode := clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1]
_, err = destClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: volumeID,
Collection: "",
SourceDataNode: sourceDataNode,
ShardIds: []uint32{0},
CopyEcxFile: true,
CopyVifFile: true,
})
if err != nil {
t.Fatalf("destination VolumeEcShardsCopy success path failed: %v", err)
}
for _, ext := range []string{".ec00", ".ecx", ".vif"} {
copyStream, copyErr := destClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
VolumeId: volumeID,
Collection: "",
IsEcVolume: true,
Ext: ext,
CompactionRevision: math.MaxUint32,
StopOffset: 1,
})
if copyErr != nil {
t.Fatalf("destination CopyFile %s start failed: %v", ext, copyErr)
}
if _, copyErr = copyStream.Recv(); copyErr != nil {
t.Fatalf("destination CopyFile %s recv failed: %v", ext, copyErr)
}
}
}
func TestEcShardsCopyFailsWhenSourceUnavailable(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: 12345,
Collection: "",
SourceDataNode: "127.0.0.1:1.1",
ShardIds: []uint32{0},
CopyEcxFile: true,
})
if err == nil || !strings.Contains(err.Error(), "VolumeEcShardsCopy volume") {
t.Fatalf("VolumeEcShardsCopy source-unavailable error mismatch: %v", err)
}
}

139
test/volume_server/grpc/health_state_test.go

@ -0,0 +1,139 @@
package volume_server_grpc_test
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestStateAndStatusRPCs(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
initialState, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
if initialState.GetState() == nil {
t.Fatalf("GetState returned nil state")
}
setResp, err := client.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{
Maintenance: true,
Version: initialState.GetState().GetVersion(),
},
})
if err != nil {
t.Fatalf("SetState(maintenance=true) failed: %v", err)
}
if !setResp.GetState().GetMaintenance() {
t.Fatalf("expected maintenance=true after SetState")
}
setResp, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{
Maintenance: false,
Version: setResp.GetState().GetVersion(),
},
})
if err != nil {
t.Fatalf("SetState(maintenance=false) failed: %v", err)
}
if setResp.GetState().GetMaintenance() {
t.Fatalf("expected maintenance=false after SetState")
}
statusResp, err := client.VolumeServerStatus(ctx, &volume_server_pb.VolumeServerStatusRequest{})
if err != nil {
t.Fatalf("VolumeServerStatus failed: %v", err)
}
if statusResp.GetVersion() == "" {
t.Fatalf("VolumeServerStatus returned empty version")
}
if len(statusResp.GetDiskStatuses()) == 0 {
t.Fatalf("VolumeServerStatus returned no disk statuses")
}
if statusResp.GetState() == nil {
t.Fatalf("VolumeServerStatus returned nil state")
}
if statusResp.GetMemoryStatus() == nil {
t.Fatalf("VolumeServerStatus returned nil memory status")
}
if statusResp.GetMemoryStatus().GetGoroutines() <= 0 {
t.Fatalf("VolumeServerStatus memory status should report goroutines, got %d", statusResp.GetMemoryStatus().GetGoroutines())
}
pingResp, err := client.Ping(ctx, &volume_server_pb.PingRequest{})
if err != nil {
t.Fatalf("Ping failed: %v", err)
}
if pingResp.GetStartTimeNs() == 0 || pingResp.GetStopTimeNs() == 0 {
t.Fatalf("Ping timestamps should be non-zero: %+v", pingResp)
}
if pingResp.GetStopTimeNs() < pingResp.GetStartTimeNs() {
t.Fatalf("Ping stop time should be >= start time: %+v", pingResp)
}
}
func TestSetStateVersionMismatchAndNilStateNoop(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
initialState, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
initialVersion := initialState.GetState().GetVersion()
staleResp, err := client.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{
Maintenance: true,
Version: initialVersion + 1,
},
})
if err == nil {
t.Fatalf("SetState with stale version should fail")
}
if !strings.Contains(err.Error(), "version mismatch") {
t.Fatalf("SetState stale version error mismatch: %v", err)
}
if staleResp.GetState().GetVersion() != initialVersion {
t.Fatalf("SetState stale version should not mutate server version: got %d want %d", staleResp.GetState().GetVersion(), initialVersion)
}
if staleResp.GetState().GetMaintenance() != initialState.GetState().GetMaintenance() {
t.Fatalf("SetState stale version should not mutate maintenance flag")
}
nilResp, err := client.SetState(ctx, &volume_server_pb.SetStateRequest{})
if err != nil {
t.Fatalf("SetState nil-state request should be no-op success: %v", err)
}
if nilResp.GetState().GetVersion() != initialVersion {
t.Fatalf("SetState nil-state should keep version unchanged: got %d want %d", nilResp.GetState().GetVersion(), initialVersion)
}
if nilResp.GetState().GetMaintenance() != initialState.GetState().GetMaintenance() {
t.Fatalf("SetState nil-state should keep maintenance unchanged")
}
}

385
test/volume_server/grpc/scrub_query_test.go

@ -0,0 +1,385 @@
package volume_server_grpc_test
import (
"context"
"io"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestScrubVolumeIndexAndUnsupportedMode(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(61)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
indexResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeID},
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err != nil {
t.Fatalf("ScrubVolume index mode failed: %v", err)
}
if indexResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume expected total_volumes=1, got %d", indexResp.GetTotalVolumes())
}
_, err = grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeID},
Mode: volume_server_pb.VolumeScrubMode(99),
})
if err == nil {
t.Fatalf("ScrubVolume should fail for unsupported mode")
}
if !strings.Contains(err.Error(), "unsupported volume scrub mode") {
t.Fatalf("ScrubVolume unsupported mode error mismatch: %v", err)
}
}
func TestScrubEcVolumeMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.ScrubEcVolume(ctx, &volume_server_pb.ScrubEcVolumeRequest{
VolumeIds: []uint32{98765},
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err == nil {
t.Fatalf("ScrubEcVolume should fail for missing EC volume")
}
if !strings.Contains(err.Error(), "EC volume id") {
t.Fatalf("ScrubEcVolume missing-volume error mismatch: %v", err)
}
}
func TestScrubEcVolumeAutoSelectNoEcVolumes(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := grpcClient.ScrubEcVolume(ctx, &volume_server_pb.ScrubEcVolumeRequest{
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err != nil {
t.Fatalf("ScrubEcVolume auto-select failed: %v", err)
}
if resp.GetTotalVolumes() != 0 {
t.Fatalf("ScrubEcVolume auto-select expected total_volumes=0 without EC data, got %d", resp.GetTotalVolumes())
}
if len(resp.GetBrokenVolumeIds()) != 0 {
t.Fatalf("ScrubEcVolume auto-select expected no broken volumes, got %v", resp.GetBrokenVolumeIds())
}
}
func TestQueryInvalidAndMissingFileIDPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
invalidStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{"bad-fid"},
Selections: []string{"name"},
Filter: &volume_server_pb.QueryRequest_Filter{},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{},
},
})
if err == nil {
_, err = invalidStream.Recv()
}
if err == nil {
t.Fatalf("Query should fail for invalid file id")
}
missingFid := framework.NewFileID(98766, 1, 1)
missingStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{missingFid},
Selections: []string{"name"},
Filter: &volume_server_pb.QueryRequest_Filter{},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{},
},
})
if err == nil {
_, err = missingStream.Recv()
}
if err == nil {
t.Fatalf("Query should fail for missing file id volume")
}
}
func TestScrubVolumeAutoSelectAndNotImplementedModes(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeIDA = uint32(62)
const volumeIDB = uint32(63)
framework.AllocateVolume(t, grpcClient, volumeIDA, "")
framework.AllocateVolume(t, grpcClient, volumeIDB, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
autoResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err != nil {
t.Fatalf("ScrubVolume auto-select failed: %v", err)
}
if autoResp.GetTotalVolumes() < 2 {
t.Fatalf("ScrubVolume auto-select expected at least 2 volumes, got %d", autoResp.GetTotalVolumes())
}
localResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeIDA},
Mode: volume_server_pb.VolumeScrubMode_LOCAL,
})
if err != nil {
t.Fatalf("ScrubVolume local mode failed: %v", err)
}
if localResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume local mode expected total_volumes=1, got %d", localResp.GetTotalVolumes())
}
if len(localResp.GetBrokenVolumeIds()) != 1 || localResp.GetBrokenVolumeIds()[0] != volumeIDA {
t.Fatalf("ScrubVolume local mode expected broken volume %d, got %v", volumeIDA, localResp.GetBrokenVolumeIds())
}
if len(localResp.GetDetails()) == 0 || !strings.Contains(strings.Join(localResp.GetDetails(), " "), "not implemented") {
t.Fatalf("ScrubVolume local mode expected not-implemented details, got %v", localResp.GetDetails())
}
fullResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeIDA},
Mode: volume_server_pb.VolumeScrubMode_FULL,
})
if err != nil {
t.Fatalf("ScrubVolume full mode failed: %v", err)
}
if fullResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume full mode expected total_volumes=1, got %d", fullResp.GetTotalVolumes())
}
if len(fullResp.GetDetails()) == 0 || !strings.Contains(strings.Join(fullResp.GetDetails(), " "), "not implemented") {
t.Fatalf("ScrubVolume full mode expected not-implemented details, got %v", fullResp.GetDetails())
}
}
func TestQueryJsonSuccessAndCsvNoOutput(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(64)
const needleID = uint64(777001)
const cookie = uint32(0xAABBCCDD)
framework.AllocateVolume(t, grpcClient, volumeID, "")
jsonLines := []byte("{\"score\":3}\n{\"score\":12}\n{\"score\":18}\n")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{fid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{
Field: "score",
Operand: ">",
Value: "10",
},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
},
})
if err != nil {
t.Fatalf("Query json start failed: %v", err)
}
firstStripe, err := queryStream.Recv()
if err != nil {
t.Fatalf("Query json recv failed: %v", err)
}
records := string(firstStripe.GetRecords())
if !strings.Contains(records, "score:12") || !strings.Contains(records, "score:18") {
t.Fatalf("Query json records missing expected filtered scores: %q", records)
}
if strings.Contains(records, "score:3") {
t.Fatalf("Query json records should not include filtered-out score: %q", records)
}
_, err = queryStream.Recv()
if err != io.EOF {
t.Fatalf("Query json expected EOF after first stripe, got: %v", err)
}
csvStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{fid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
CsvInput: &volume_server_pb.QueryRequest_InputSerialization_CSVInput{},
},
})
if err != nil {
t.Fatalf("Query csv start failed: %v", err)
}
_, err = csvStream.Recv()
if err != io.EOF {
t.Fatalf("Query csv expected EOF with no rows, got: %v", err)
}
}
func TestQueryJsonNoMatchReturnsEmptyStripe(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(65)
const needleID = uint64(777002)
const cookie = uint32(0xABABCDCD)
framework.AllocateVolume(t, grpcClient, volumeID, "")
jsonLines := []byte("{\"score\":1}\n{\"score\":2}\n")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{fid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{
Field: "score",
Operand: ">",
Value: "100",
},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
},
})
if err != nil {
t.Fatalf("Query json no-match start failed: %v", err)
}
firstStripe, err := queryStream.Recv()
if err != nil {
t.Fatalf("Query json no-match recv failed: %v", err)
}
if len(firstStripe.GetRecords()) != 0 {
t.Fatalf("Query json no-match expected empty records stripe, got: %q", string(firstStripe.GetRecords()))
}
_, err = queryStream.Recv()
if err != io.EOF {
t.Fatalf("Query json no-match expected EOF after first empty stripe, got: %v", err)
}
}
func TestQueryCookieMismatchReturnsEOFNoResults(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(66)
const needleID = uint64(777003)
const cookie = uint32(0xCDCDABAB)
framework.AllocateVolume(t, grpcClient, volumeID, "")
jsonLines := []byte("{\"score\":7}\n{\"score\":8}\n")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
wrongCookieFid := framework.NewFileID(volumeID, needleID, cookie+1)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{wrongCookieFid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{
Field: "score",
Operand: ">",
Value: "0",
},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
},
})
if err != nil {
t.Fatalf("Query start for cookie mismatch should not fail immediately, got: %v", err)
}
_, err = stream.Recv()
if err != io.EOF {
t.Fatalf("Query cookie mismatch expected EOF with no streamed records, got: %v", err)
}
}

206
test/volume_server/grpc/tail_test.go

@ -0,0 +1,206 @@
package volume_server_grpc_test
import (
"bytes"
"context"
"io"
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestVolumeTailSenderMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{VolumeId: 77777, SinceNs: 0, IdleTimeoutSeconds: 1})
if err == nil {
_, err = stream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found volume") {
t.Fatalf("VolumeTailSender missing-volume error mismatch: %v", err)
}
}
func TestVolumeTailSenderHeartbeatThenEOF(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(71)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{
VolumeId: volumeID,
SinceNs: 0,
IdleTimeoutSeconds: 1,
})
if err != nil {
t.Fatalf("VolumeTailSender start failed: %v", err)
}
msg, err := stream.Recv()
if err != nil {
t.Fatalf("VolumeTailSender first recv failed: %v", err)
}
if !msg.GetIsLastChunk() {
t.Fatalf("expected first tail message to be heartbeat IsLastChunk=true")
}
_, err = stream.Recv()
if err != io.EOF {
t.Fatalf("expected EOF after idle timeout drain, got: %v", err)
}
}
func TestVolumeTailReceiverMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{VolumeId: 88888, SourceVolumeServer: clusterHarness.VolumeServerAddress(), SinceNs: 0, IdleTimeoutSeconds: 1})
if err == nil || !strings.Contains(err.Error(), "receiver not found volume") {
t.Fatalf("VolumeTailReceiver missing-volume error mismatch: %v", err)
}
}
func TestVolumeTailReceiverReplicatesSourceUpdates(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer sourceConn.Close()
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer destConn.Close()
const volumeID = uint32(72)
framework.AllocateVolume(t, sourceClient, volumeID, "")
framework.AllocateVolume(t, destClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 880003, 0x3456789A)
payload := []byte("tail-receiver-replicates-source-updates")
sourceUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, sourceUploadResp)
if sourceUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("source upload expected 201, got %d", sourceUploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := destClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{
VolumeId: volumeID,
SourceVolumeServer: clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1],
SinceNs: 0,
IdleTimeoutSeconds: 1,
})
if err != nil {
t.Fatalf("VolumeTailReceiver success path failed: %v", err)
}
destReadResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
destReadBody := framework.ReadAllAndClose(t, destReadResp)
if destReadResp.StatusCode != http.StatusOK {
t.Fatalf("destination read after tail receive expected 200, got %d", destReadResp.StatusCode)
}
if string(destReadBody) != string(payload) {
t.Fatalf("destination tail-received payload mismatch: got %q want %q", string(destReadBody), string(payload))
}
}
func TestVolumeTailSenderLargeNeedleChunking(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(73)
framework.AllocateVolume(t, grpcClient, volumeID, "")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 880004, 0x456789AB)
largePayload := bytes.Repeat([]byte("L"), 2*1024*1024+128*1024)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
stream, err := grpcClient.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{
VolumeId: volumeID,
SinceNs: 0,
IdleTimeoutSeconds: 1,
})
if err != nil {
t.Fatalf("VolumeTailSender start failed: %v", err)
}
dataChunkCount := 0
sawNonLastDataChunk := false
sawLastDataChunk := false
for {
msg, recvErr := stream.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
t.Fatalf("VolumeTailSender recv failed: %v", recvErr)
}
if len(msg.GetNeedleBody()) == 0 {
continue
}
dataChunkCount++
if msg.GetIsLastChunk() {
sawLastDataChunk = true
} else {
sawNonLastDataChunk = true
}
}
if dataChunkCount < 2 {
t.Fatalf("VolumeTailSender expected multiple chunks for large needle, got %d", dataChunkCount)
}
if !sawNonLastDataChunk {
t.Fatalf("VolumeTailSender expected at least one non-last data chunk")
}
if !sawLastDataChunk {
t.Fatalf("VolumeTailSender expected a final data chunk marked IsLastChunk=true")
}
}

236
test/volume_server/grpc/tiering_remote_test.go

@ -0,0 +1,236 @@
package volume_server_grpc_test
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestFetchAndWriteNeedleMaintenanceAndMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.FetchAndWriteNeedle(ctx, &volume_server_pb.FetchAndWriteNeedleRequest{
VolumeId: 98781,
NeedleId: 1,
})
if err == nil || !strings.Contains(err.Error(), "not found volume id") {
t.Fatalf("FetchAndWriteNeedle missing-volume error mismatch: %v", err)
}
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{
Maintenance: true,
Version: stateResp.GetState().GetVersion(),
},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
_, err = grpcClient.FetchAndWriteNeedle(ctx, &volume_server_pb.FetchAndWriteNeedleRequest{
VolumeId: 1,
NeedleId: 1,
})
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("FetchAndWriteNeedle maintenance error mismatch: %v", err)
}
}
func TestFetchAndWriteNeedleInvalidRemoteConfig(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(88)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.FetchAndWriteNeedle(ctx, &volume_server_pb.FetchAndWriteNeedleRequest{
VolumeId: volumeID,
NeedleId: 1,
Cookie: 1,
Size: 1,
RemoteConf: &remote_pb.RemoteConf{
Name: "it-invalid-remote",
Type: "does-not-exist",
},
RemoteLocation: &remote_pb.RemoteStorageLocation{
Name: "it-invalid-remote",
Path: "/test",
},
})
if err == nil || !strings.Contains(err.Error(), "get remote client") {
t.Fatalf("FetchAndWriteNeedle invalid-remote error mismatch: %v", err)
}
}
func TestVolumeTierMoveDatToRemoteErrorPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(85)
const collection = "tier-collection"
framework.AllocateVolume(t, grpcClient, volumeID, collection)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
missingStream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
VolumeId: 98782,
Collection: collection,
DestinationBackendName: "dummy",
})
if err == nil {
_, err = missingStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeTierMoveDatToRemote missing-volume error mismatch: %v", err)
}
mismatchStream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
VolumeId: volumeID,
Collection: "wrong-collection",
DestinationBackendName: "dummy",
})
if err == nil {
_, err = mismatchStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "unexpected input") {
t.Fatalf("VolumeTierMoveDatToRemote collection mismatch error mismatch: %v", err)
}
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{
Maintenance: true,
Version: stateResp.GetState().GetVersion(),
},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
maintenanceStream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
VolumeId: volumeID,
Collection: collection,
DestinationBackendName: "dummy",
})
if err == nil {
_, err = maintenanceStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("VolumeTierMoveDatToRemote maintenance error mismatch: %v", err)
}
}
func TestVolumeTierMoveDatToRemoteMissingBackend(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(89)
const collection = "tier-missing-backend"
framework.AllocateVolume(t, grpcClient, volumeID, collection)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
VolumeId: volumeID,
Collection: collection,
DestinationBackendName: "definitely-missing-backend",
})
if err == nil {
_, err = stream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "destination definitely-missing-backend not found") {
t.Fatalf("VolumeTierMoveDatToRemote missing-backend error mismatch: %v", err)
}
}
func TestVolumeTierMoveDatFromRemoteErrorPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(86)
const collection = "tier-download-collection"
framework.AllocateVolume(t, grpcClient, volumeID, collection)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
missingStream, err := grpcClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
VolumeId: 98783,
Collection: collection,
})
if err == nil {
_, err = missingStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("VolumeTierMoveDatFromRemote missing-volume error mismatch: %v", err)
}
mismatchStream, err := grpcClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
VolumeId: volumeID,
Collection: "wrong-collection",
})
if err == nil {
_, err = mismatchStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "unexpected input") {
t.Fatalf("VolumeTierMoveDatFromRemote collection mismatch error mismatch: %v", err)
}
localDiskStream, err := grpcClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
VolumeId: volumeID,
Collection: collection,
})
if err == nil {
_, err = localDiskStream.Recv()
}
if err == nil || !strings.Contains(err.Error(), "already on local disk") {
t.Fatalf("VolumeTierMoveDatFromRemote local-disk error mismatch: %v", err)
}
}

87
test/volume_server/grpc/vacuum_test.go

@ -0,0 +1,87 @@
package volume_server_grpc_test
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestVacuumVolumeCheckSuccessAndMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(31)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := grpcClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{VolumeId: volumeID})
if err != nil {
t.Fatalf("VacuumVolumeCheck existing volume failed: %v", err)
}
if resp.GetGarbageRatio() < 0 || resp.GetGarbageRatio() > 1 {
t.Fatalf("unexpected garbage ratio: %f", resp.GetGarbageRatio())
}
_, err = grpcClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{VolumeId: 99999})
if err == nil {
t.Fatalf("VacuumVolumeCheck should fail for missing volume")
}
}
func TestVacuumMaintenanceModeRejections(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
if err != nil {
t.Fatalf("GetState failed: %v", err)
}
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
})
if err != nil {
t.Fatalf("SetState maintenance=true failed: %v", err)
}
assertMaintenanceErr := func(name string, err error) {
t.Helper()
if err == nil {
t.Fatalf("%s should fail in maintenance mode", name)
}
if !strings.Contains(err.Error(), "maintenance mode") {
t.Fatalf("%s expected maintenance mode error, got: %v", name, err)
}
}
compactStream, err := grpcClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{VolumeId: 31})
if err == nil {
_, err = compactStream.Recv()
}
assertMaintenanceErr("VacuumVolumeCompact", err)
_, err = grpcClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{VolumeId: 31})
assertMaintenanceErr("VacuumVolumeCommit", err)
_, err = grpcClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{VolumeId: 31})
assertMaintenanceErr("VacuumVolumeCleanup", err)
}

174
test/volume_server/http/admin_test.go

@ -0,0 +1,174 @@
package volume_server_http_test
import (
"encoding/json"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
)
func TestAdminStatusAndHealthz(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
statusReq, err := http.NewRequest(http.MethodGet, cluster.VolumeAdminURL()+"/status", nil)
if err != nil {
t.Fatalf("create status request: %v", err)
}
statusReq.Header.Set(request_id.AmzRequestIDHeader, "test-request-id-1")
statusResp := framework.DoRequest(t, client, statusReq)
statusBody := framework.ReadAllAndClose(t, statusResp)
if statusResp.StatusCode != http.StatusOK {
t.Fatalf("expected /status code 200, got %d, body: %s", statusResp.StatusCode, string(statusBody))
}
if got := statusResp.Header.Get("Server"); !strings.Contains(got, "SeaweedFS Volume") {
t.Fatalf("expected /status Server header to contain SeaweedFS Volume, got %q", got)
}
if got := statusResp.Header.Get(request_id.AmzRequestIDHeader); got != "test-request-id-1" {
t.Fatalf("expected echoed request id, got %q", got)
}
var payload map[string]interface{}
if err := json.Unmarshal(statusBody, &payload); err != nil {
t.Fatalf("decode status response: %v", err)
}
for _, field := range []string{"Version", "DiskStatuses", "Volumes"} {
if _, found := payload[field]; !found {
t.Fatalf("status payload missing field %q", field)
}
}
healthReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/healthz")
healthReq.Header.Set(request_id.AmzRequestIDHeader, "test-request-id-2")
healthResp := framework.DoRequest(t, client, healthReq)
_ = framework.ReadAllAndClose(t, healthResp)
if healthResp.StatusCode != http.StatusOK {
t.Fatalf("expected /healthz code 200, got %d", healthResp.StatusCode)
}
if got := healthResp.Header.Get("Server"); !strings.Contains(got, "SeaweedFS Volume") {
t.Fatalf("expected /healthz Server header to contain SeaweedFS Volume, got %q", got)
}
if got := healthResp.Header.Get(request_id.AmzRequestIDHeader); got != "test-request-id-2" {
t.Fatalf("expected /healthz echoed request id, got %q", got)
}
uiResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/ui/index.html"))
uiBody := framework.ReadAllAndClose(t, uiResp)
if uiResp.StatusCode != http.StatusOK {
t.Fatalf("expected /ui/index.html code 200, got %d, body: %s", uiResp.StatusCode, string(uiBody))
}
if !strings.Contains(strings.ToLower(string(uiBody)), "volume") {
t.Fatalf("ui page does not look like volume status page")
}
}
func TestOptionsMethodsByPort(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P2())
client := framework.NewHTTPClient()
adminResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodOptions, cluster.VolumeAdminURL()+"/"))
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusOK {
t.Fatalf("admin OPTIONS expected 200, got %d", adminResp.StatusCode)
}
adminAllowed := adminResp.Header.Get("Access-Control-Allow-Methods")
for _, expected := range []string{"PUT", "POST", "GET", "DELETE", "OPTIONS"} {
if !strings.Contains(adminAllowed, expected) {
t.Fatalf("admin allow methods missing %q, got %q", expected, adminAllowed)
}
}
if adminResp.Header.Get("Access-Control-Allow-Headers") != "*" {
t.Fatalf("admin allow headers expected '*', got %q", adminResp.Header.Get("Access-Control-Allow-Headers"))
}
publicResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodOptions, cluster.VolumePublicURL()+"/"))
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public OPTIONS expected 200, got %d", publicResp.StatusCode)
}
publicAllowed := publicResp.Header.Get("Access-Control-Allow-Methods")
if !strings.Contains(publicAllowed, "GET") || !strings.Contains(publicAllowed, "OPTIONS") {
t.Fatalf("public allow methods expected GET and OPTIONS, got %q", publicAllowed)
}
if strings.Contains(publicAllowed, "POST") {
t.Fatalf("public allow methods should not include POST, got %q", publicAllowed)
}
if publicResp.Header.Get("Access-Control-Allow-Headers") != "*" {
t.Fatalf("public allow headers expected '*', got %q", publicResp.Header.Get("Access-Control-Allow-Headers"))
}
}
func TestOptionsWithOriginIncludesCorsHeaders(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P2())
client := framework.NewHTTPClient()
adminReq := mustNewRequest(t, http.MethodOptions, cluster.VolumeAdminURL()+"/")
adminReq.Header.Set("Origin", "https://example.com")
adminResp := framework.DoRequest(t, client, adminReq)
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusOK {
t.Fatalf("admin OPTIONS expected 200, got %d", adminResp.StatusCode)
}
if adminResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("admin OPTIONS expected Access-Control-Allow-Origin=*, got %q", adminResp.Header.Get("Access-Control-Allow-Origin"))
}
if adminResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("admin OPTIONS expected Access-Control-Allow-Credentials=true, got %q", adminResp.Header.Get("Access-Control-Allow-Credentials"))
}
publicReq := mustNewRequest(t, http.MethodOptions, cluster.VolumePublicURL()+"/")
publicReq.Header.Set("Origin", "https://example.com")
publicResp := framework.DoRequest(t, client, publicReq)
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public OPTIONS expected 200, got %d", publicResp.StatusCode)
}
if publicResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("public OPTIONS expected Access-Control-Allow-Origin=*, got %q", publicResp.Header.Get("Access-Control-Allow-Origin"))
}
if publicResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("public OPTIONS expected Access-Control-Allow-Credentials=true, got %q", publicResp.Header.Get("Access-Control-Allow-Credentials"))
}
}
func TestUiIndexNotExposedWhenJwtSigningEnabled(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P3())
client := framework.NewHTTPClient()
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/ui/index.html"))
body := framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expected /ui/index.html to be gated by auth under JWT profile (401), got %d body=%s", resp.StatusCode, string(body))
}
}
func mustNewRequest(t testing.TB, method, url string) *http.Request {
t.Helper()
req, err := http.NewRequest(method, url, nil)
if err != nil {
t.Fatalf("create request %s %s: %v", method, url, err)
}
return req
}

419
test/volume_server/http/auth_test.go

@ -0,0 +1,419 @@
package volume_server_http_test
import (
"bytes"
"net/http"
"testing"
"time"
jwt "github.com/golang-jwt/jwt/v5"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/security"
)
func TestJWTAuthForWriteAndRead(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(51)
const needleID = uint64(123456)
const cookie = uint32(0xABCDEF12)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("jwt-protected-content")
client := framework.NewHTTPClient()
unauthWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
unauthWriteResp := framework.DoRequest(t, client, unauthWrite)
_ = framework.ReadAllAndClose(t, unauthWriteResp)
if unauthWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("unauthorized write expected 401, got %d", unauthWriteResp.StatusCode)
}
invalidWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
invalidWrite.Header.Set("Authorization", "Bearer invalid")
invalidWriteResp := framework.DoRequest(t, client, invalidWrite)
_ = framework.ReadAllAndClose(t, invalidWriteResp)
if invalidWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("invalid write token expected 401, got %d", invalidWriteResp.StatusCode)
}
writeToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
authWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
authWrite.Header.Set("Authorization", "Bearer "+string(writeToken))
authWriteResp := framework.DoRequest(t, client, authWrite)
_ = framework.ReadAllAndClose(t, authWriteResp)
if authWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("authorized write expected 201, got %d", authWriteResp.StatusCode)
}
unauthReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
unauthReadResp := framework.DoRequest(t, client, unauthReadReq)
_ = framework.ReadAllAndClose(t, unauthReadResp)
if unauthReadResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("unauthorized read expected 401, got %d", unauthReadResp.StatusCode)
}
readToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
authReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
authReadReq.Header.Set("Authorization", "Bearer "+string(readToken))
authReadResp := framework.DoRequest(t, client, authReadReq)
authReadBody := framework.ReadAllAndClose(t, authReadResp)
if authReadResp.StatusCode != http.StatusOK {
t.Fatalf("authorized read expected 200, got %d", authReadResp.StatusCode)
}
if string(authReadBody) != string(payload) {
t.Fatalf("authorized read content mismatch: got %q want %q", string(authReadBody), string(payload))
}
}
func TestJWTAuthRejectsFidMismatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(52)
const needleID = uint64(223344)
const cookie = uint32(0x10203040)
const otherNeedleID = uint64(223345)
const otherCookie = uint32(0x50607080)
const wrongCookie = uint32(0x10203041)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFid := framework.NewFileID(volumeID, otherNeedleID, otherCookie)
payload := []byte("jwt-fid-mismatch-content")
client := framework.NewHTTPClient()
writeTokenForOtherFid := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFid)
mismatchedWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
mismatchedWrite.Header.Set("Authorization", "Bearer "+string(writeTokenForOtherFid))
mismatchedWriteResp := framework.DoRequest(t, client, mismatchedWrite)
_ = framework.ReadAllAndClose(t, mismatchedWriteResp)
if mismatchedWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("write with mismatched fid token expected 401, got %d", mismatchedWriteResp.StatusCode)
}
wrongCookieFid := framework.NewFileID(volumeID, needleID, wrongCookie)
writeTokenWrongCookie := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, wrongCookieFid)
wrongCookieWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
wrongCookieWrite.Header.Set("Authorization", "Bearer "+string(writeTokenWrongCookie))
wrongCookieWriteResp := framework.DoRequest(t, client, wrongCookieWrite)
_ = framework.ReadAllAndClose(t, wrongCookieWriteResp)
if wrongCookieWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("write with wrong-cookie fid token expected 401, got %d", wrongCookieWriteResp.StatusCode)
}
writeTokenForFid := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
validWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
validWrite.Header.Set("Authorization", "Bearer "+string(writeTokenForFid))
validWriteResp := framework.DoRequest(t, client, validWrite)
_ = framework.ReadAllAndClose(t, validWriteResp)
if validWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("authorized write expected 201, got %d", validWriteResp.StatusCode)
}
readTokenForOtherFid := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFid)
mismatchedReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
mismatchedReadReq.Header.Set("Authorization", "Bearer "+string(readTokenForOtherFid))
mismatchedReadResp := framework.DoRequest(t, client, mismatchedReadReq)
_ = framework.ReadAllAndClose(t, mismatchedReadResp)
if mismatchedReadResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("read with mismatched fid token expected 401, got %d", mismatchedReadResp.StatusCode)
}
readTokenWrongCookie := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, wrongCookieFid)
wrongCookieReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
wrongCookieReadReq.Header.Set("Authorization", "Bearer "+string(readTokenWrongCookie))
wrongCookieReadResp := framework.DoRequest(t, client, wrongCookieReadReq)
_ = framework.ReadAllAndClose(t, wrongCookieReadResp)
if wrongCookieReadResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("read with wrong-cookie fid token expected 401, got %d", wrongCookieReadResp.StatusCode)
}
}
func newUploadRequest(t testing.TB, url string, payload []byte) *http.Request {
t.Helper()
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(payload))
if err != nil {
t.Fatalf("create upload request %s: %v", url, err)
}
req.Header.Set("Content-Type", "application/octet-stream")
return req
}
func TestJWTAuthRejectsExpiredTokens(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(53)
const needleID = uint64(334455)
const cookie = uint32(0x22334455)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("expired-token-content")
client := framework.NewHTTPClient()
expiredWriteToken := mustGenExpiredToken(t, []byte(profile.JWTSigningKey), fid)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
writeReq.Header.Set("Authorization", "Bearer "+expiredWriteToken)
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expired write token expected 401, got %d", writeResp.StatusCode)
}
// Seed data with a valid token so read auth path can be exercised against existing content.
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
validWriteReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
validWriteReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
validWriteResp := framework.DoRequest(t, client, validWriteReq)
_ = framework.ReadAllAndClose(t, validWriteResp)
if validWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("valid write expected 201, got %d", validWriteResp.StatusCode)
}
expiredReadToken := mustGenExpiredToken(t, []byte(profile.JWTReadKey), fid)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
readReq.Header.Set("Authorization", "Bearer "+expiredReadToken)
readResp := framework.DoRequest(t, client, readReq)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expired read token expected 401, got %d", readResp.StatusCode)
}
}
func TestJWTAuthViaQueryParamAndCookie(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(54)
const needleID = uint64(445566)
const cookie = uint32(0x31415926)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("jwt-query-cookie-content")
client := framework.NewHTTPClient()
writeToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(writeToken), payload)
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusCreated {
t.Fatalf("query-jwt write expected 201, got %d", writeResp.StatusCode)
}
readToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
readReq.AddCookie(&http.Cookie{Name: "AT", Value: string(readToken)})
readResp := framework.DoRequest(t, client, readReq)
readBody := framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusOK {
t.Fatalf("cookie-jwt read expected 200, got %d", readResp.StatusCode)
}
if string(readBody) != string(payload) {
t.Fatalf("cookie-jwt read body mismatch: got %q want %q", string(readBody), string(payload))
}
}
func TestJWTTokenSourcePrecedenceQueryOverHeader(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(55)
const needleID = uint64(556677)
const cookie = uint32(0x99887766)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFID := framework.NewFileID(volumeID, needleID+1, cookie+1)
payload := []byte("jwt-precedence-content")
client := framework.NewHTTPClient()
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
invalidWriteQueryToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFID)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidWriteQueryToken), payload)
writeReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over header token for write, expected 401 got %d", writeResp.StatusCode)
}
// Seed data with valid write token, then exercise read precedence.
seedWriteReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
seedWriteReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
seedWriteResp := framework.DoRequest(t, client, seedWriteReq)
_ = framework.ReadAllAndClose(t, seedWriteResp)
if seedWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("seed write expected 201, got %d", seedWriteResp.StatusCode)
}
validReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
invalidReadQueryToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidReadQueryToken))
readReq.Header.Set("Authorization", "Bearer "+string(validReadToken))
readResp := framework.DoRequest(t, client, readReq)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over header token for read, expected 401 got %d", readResp.StatusCode)
}
}
func TestJWTTokenSourcePrecedenceHeaderOverCookie(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(56)
const needleID = uint64(667788)
const cookie = uint32(0x11229988)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFID := framework.NewFileID(volumeID, needleID+1, cookie+1)
payload := []byte("jwt-precedence-header-cookie")
client := framework.NewHTTPClient()
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
invalidCookieWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFID)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
writeReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
writeReq.AddCookie(&http.Cookie{Name: "AT", Value: string(invalidCookieWriteToken)})
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusCreated {
t.Fatalf("header token should take precedence over cookie token for write, expected 201 got %d", writeResp.StatusCode)
}
validReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
invalidCookieReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
readReq.Header.Set("Authorization", "Bearer "+string(validReadToken))
readReq.AddCookie(&http.Cookie{Name: "AT", Value: string(invalidCookieReadToken)})
readResp := framework.DoRequest(t, client, readReq)
readBody := framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusOK {
t.Fatalf("header token should take precedence over cookie token for read, expected 200 got %d", readResp.StatusCode)
}
if string(readBody) != string(payload) {
t.Fatalf("header-over-cookie read body mismatch: got %q want %q", string(readBody), string(payload))
}
}
func TestJWTTokenSourcePrecedenceQueryOverCookie(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(57)
const needleID = uint64(778899)
const cookie = uint32(0x88776655)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFID := framework.NewFileID(volumeID, needleID+1, cookie+1)
payload := []byte("jwt-precedence-query-cookie")
client := framework.NewHTTPClient()
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
invalidQueryWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFID)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidQueryWriteToken), payload)
writeReq.AddCookie(&http.Cookie{Name: "AT", Value: string(validWriteToken)})
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over cookie token for write, expected 401 got %d", writeResp.StatusCode)
}
// Seed data with valid write token so read precedence can be exercised.
seedWriteReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
seedWriteReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
seedWriteResp := framework.DoRequest(t, client, seedWriteReq)
_ = framework.ReadAllAndClose(t, seedWriteResp)
if seedWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("seed write expected 201, got %d", seedWriteResp.StatusCode)
}
validReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
invalidQueryReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidQueryReadToken))
readReq.AddCookie(&http.Cookie{Name: "AT", Value: string(validReadToken)})
readResp := framework.DoRequest(t, client, readReq)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over cookie token for read, expected 401 got %d", readResp.StatusCode)
}
// Validate positive path: valid query token should succeed even if cookie token is invalid.
validQueryReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(validReadToken))
invalidCookieReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
validQueryReadReq.AddCookie(&http.Cookie{Name: "AT", Value: string(invalidCookieReadToken)})
validQueryReadResp := framework.DoRequest(t, client, validQueryReadReq)
validQueryReadBody := framework.ReadAllAndClose(t, validQueryReadResp)
if validQueryReadResp.StatusCode != http.StatusOK {
t.Fatalf("valid query token should succeed over invalid cookie token, expected 200 got %d", validQueryReadResp.StatusCode)
}
if string(validQueryReadBody) != string(payload) {
t.Fatalf("query-over-cookie read body mismatch: got %q want %q", string(validQueryReadBody), string(payload))
}
}
func mustGenExpiredToken(t testing.TB, key []byte, fid string) string {
t.Helper()
claims := security.SeaweedFileIdClaims{
Fid: fid,
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(-1 * time.Minute)),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signed, err := token.SignedString(key)
if err != nil {
t.Fatalf("sign expired token: %v", err)
}
return signed
}

232
test/volume_server/http/chunk_manifest_test.go

@ -0,0 +1,232 @@
package volume_server_http_test
import (
"bytes"
"encoding/json"
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/operation"
)
func TestChunkManifestExpansionAndBypass(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(102)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
chunkFID := framework.NewFileID(volumeID, 772005, 0x5E6F7081)
chunkPayload := []byte("chunk-manifest-expanded-content")
chunkUploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), chunkFID, chunkPayload)
_ = framework.ReadAllAndClose(t, chunkUploadResp)
if chunkUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("chunk upload expected 201, got %d", chunkUploadResp.StatusCode)
}
manifest := &operation.ChunkManifest{
Name: "manifest.bin",
Mime: "application/octet-stream",
Size: int64(len(chunkPayload)),
Chunks: []*operation.ChunkInfo{
{
Fid: chunkFID,
Offset: 0,
Size: int64(len(chunkPayload)),
},
},
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
t.Fatalf("marshal chunk manifest: %v", err)
}
manifestFID := framework.NewFileID(volumeID, 772006, 0x6F708192)
manifestUploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=true", bytes.NewReader(manifestBytes))
if err != nil {
t.Fatalf("create manifest upload request: %v", err)
}
manifestUploadReq.Header.Set("Content-Type", "application/json")
manifestUploadResp := framework.DoRequest(t, client, manifestUploadReq)
_ = framework.ReadAllAndClose(t, manifestUploadResp)
if manifestUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("manifest upload expected 201, got %d", manifestUploadResp.StatusCode)
}
expandedReadResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), manifestFID)
expandedReadBody := framework.ReadAllAndClose(t, expandedReadResp)
if expandedReadResp.StatusCode != http.StatusOK {
t.Fatalf("manifest expanded read expected 200, got %d", expandedReadResp.StatusCode)
}
if string(expandedReadBody) != string(chunkPayload) {
t.Fatalf("manifest expanded read mismatch: got %q want %q", string(expandedReadBody), string(chunkPayload))
}
if expandedReadResp.Header.Get("X-File-Store") != "chunked" {
t.Fatalf("manifest expanded read expected X-File-Store=chunked, got %q", expandedReadResp.Header.Get("X-File-Store"))
}
bypassReadResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=false"))
bypassReadBody := framework.ReadAllAndClose(t, bypassReadResp)
if bypassReadResp.StatusCode != http.StatusOK {
t.Fatalf("manifest bypass read expected 200, got %d", bypassReadResp.StatusCode)
}
if bypassReadResp.Header.Get("X-File-Store") != "" {
t.Fatalf("manifest bypass read expected empty X-File-Store header, got %q", bypassReadResp.Header.Get("X-File-Store"))
}
var gotManifest operation.ChunkManifest
if err = json.Unmarshal(bypassReadBody, &gotManifest); err != nil {
t.Fatalf("manifest bypass read expected JSON payload, got decode error: %v body=%q", err, string(bypassReadBody))
}
if len(gotManifest.Chunks) != 1 || gotManifest.Chunks[0].Fid != chunkFID {
t.Fatalf("manifest bypass read payload mismatch: %+v", gotManifest)
}
}
func TestChunkManifestDeleteRemovesChildChunks(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(104)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
chunkFID := framework.NewFileID(volumeID, 772008, 0x8192A3B4)
chunkPayload := []byte("chunk-manifest-delete-content")
chunkUploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), chunkFID, chunkPayload)
_ = framework.ReadAllAndClose(t, chunkUploadResp)
if chunkUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("chunk upload expected 201, got %d", chunkUploadResp.StatusCode)
}
manifest := &operation.ChunkManifest{
Name: "manifest-delete.bin",
Mime: "application/octet-stream",
Size: int64(len(chunkPayload)),
Chunks: []*operation.ChunkInfo{
{
Fid: chunkFID,
Offset: 0,
Size: int64(len(chunkPayload)),
},
},
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
t.Fatalf("marshal chunk manifest: %v", err)
}
manifestFID := framework.NewFileID(volumeID, 772009, 0x92A3B4C5)
manifestUploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=true", bytes.NewReader(manifestBytes))
if err != nil {
t.Fatalf("create manifest upload request: %v", err)
}
manifestUploadReq.Header.Set("Content-Type", "application/json")
manifestUploadResp := framework.DoRequest(t, client, manifestUploadReq)
_ = framework.ReadAllAndClose(t, manifestUploadResp)
if manifestUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("manifest upload expected 201, got %d", manifestUploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+manifestFID))
deleteBody := framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("manifest delete expected 202, got %d", deleteResp.StatusCode)
}
var deleteResult map[string]int64
if err = json.Unmarshal(deleteBody, &deleteResult); err != nil {
t.Fatalf("decode manifest delete response: %v body=%q", err, string(deleteBody))
}
if deleteResult["size"] != int64(len(chunkPayload)) {
t.Fatalf("manifest delete expected size=%d, got %d", len(chunkPayload), deleteResult["size"])
}
manifestReadAfterDelete := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), manifestFID)
_ = framework.ReadAllAndClose(t, manifestReadAfterDelete)
if manifestReadAfterDelete.StatusCode != http.StatusNotFound {
t.Fatalf("manifest read after delete expected 404, got %d", manifestReadAfterDelete.StatusCode)
}
chunkReadAfterDelete := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), chunkFID)
_ = framework.ReadAllAndClose(t, chunkReadAfterDelete)
if chunkReadAfterDelete.StatusCode != http.StatusNotFound {
t.Fatalf("chunk read after manifest delete expected 404, got %d", chunkReadAfterDelete.StatusCode)
}
}
func TestChunkManifestDeleteFailsWhenChildDeletionFails(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(105)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
manifest := &operation.ChunkManifest{
Name: "manifest-delete-failure.bin",
Mime: "application/octet-stream",
Size: 1,
Chunks: []*operation.ChunkInfo{
{
Fid: "not-a-valid-fid",
Offset: 0,
Size: 1,
},
},
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
t.Fatalf("marshal chunk manifest: %v", err)
}
manifestFID := framework.NewFileID(volumeID, 772010, 0xA3B4C5D6)
manifestUploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=true", bytes.NewReader(manifestBytes))
if err != nil {
t.Fatalf("create manifest upload request: %v", err)
}
manifestUploadReq.Header.Set("Content-Type", "application/json")
manifestUploadResp := framework.DoRequest(t, client, manifestUploadReq)
_ = framework.ReadAllAndClose(t, manifestUploadResp)
if manifestUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("manifest upload expected 201, got %d", manifestUploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+manifestFID))
deleteBody := framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusInternalServerError {
t.Fatalf("manifest delete with invalid child fid expected 500, got %d body=%q", deleteResp.StatusCode, string(deleteBody))
}
manifestBypassRead := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=false"))
manifestBypassBody := framework.ReadAllAndClose(t, manifestBypassRead)
if manifestBypassRead.StatusCode != http.StatusOK {
t.Fatalf("manifest bypass read after failed delete expected 200, got %d", manifestBypassRead.StatusCode)
}
var gotManifest operation.ChunkManifest
if err = json.Unmarshal(manifestBypassBody, &gotManifest); err != nil {
t.Fatalf("manifest bypass read expected JSON payload, got decode error: %v body=%q", err, string(manifestBypassBody))
}
if len(gotManifest.Chunks) != 1 || gotManifest.Chunks[0].Fid != "not-a-valid-fid" {
t.Fatalf("manifest payload mismatch after failed delete: %+v", gotManifest)
}
}

97
test/volume_server/http/compressed_read_test.go

@ -0,0 +1,97 @@
package volume_server_http_test
import (
"bytes"
"compress/gzip"
"io"
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func gzipData(t testing.TB, data []byte) []byte {
t.Helper()
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
if _, err := zw.Write(data); err != nil {
t.Fatalf("gzip write: %v", err)
}
if err := zw.Close(); err != nil {
t.Fatalf("gzip close: %v", err)
}
return buf.Bytes()
}
func gunzipData(t testing.TB, data []byte) []byte {
t.Helper()
zr, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
t.Fatalf("gunzip new reader: %v", err)
}
defer zr.Close()
out, err := io.ReadAll(zr)
if err != nil {
t.Fatalf("gunzip read: %v", err)
}
return out
}
func TestCompressedReadAcceptEncodingMatrix(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(103)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772007, 0x708192A3)
plainPayload := []byte("compressed-read-accept-encoding-matrix-content-compressed-read-accept-encoding-matrix-content")
compressedPayload := gzipData(t, plainPayload)
uploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+fid, bytes.NewReader(compressedPayload))
if err != nil {
t.Fatalf("create compressed upload request: %v", err)
}
uploadReq.Header.Set("Content-Type", "text/plain")
uploadReq.Header.Set("Content-Encoding", "gzip")
uploadResp := framework.DoRequest(t, client, uploadReq)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("compressed upload expected 201, got %d", uploadResp.StatusCode)
}
gzipReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
gzipReadReq.Header.Set("Accept-Encoding", "gzip")
gzipReadResp := framework.DoRequest(t, client, gzipReadReq)
gzipReadBody := framework.ReadAllAndClose(t, gzipReadResp)
if gzipReadResp.StatusCode != http.StatusOK {
t.Fatalf("gzip-accepted read expected 200, got %d", gzipReadResp.StatusCode)
}
if gzipReadResp.Header.Get("Content-Encoding") != "gzip" {
t.Fatalf("gzip-accepted read expected Content-Encoding=gzip, got %q", gzipReadResp.Header.Get("Content-Encoding"))
}
if string(gunzipData(t, gzipReadBody)) != string(plainPayload) {
t.Fatalf("gzip-accepted read body mismatch after gunzip")
}
identityReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
identityReadReq.Header.Set("Accept-Encoding", "identity")
identityReadResp := framework.DoRequest(t, client, identityReadReq)
identityReadBody := framework.ReadAllAndClose(t, identityReadResp)
if identityReadResp.StatusCode != http.StatusOK {
t.Fatalf("identity read expected 200, got %d", identityReadResp.StatusCode)
}
if identityReadResp.Header.Get("Content-Encoding") != "" {
t.Fatalf("identity read expected no Content-Encoding header, got %q", identityReadResp.Header.Get("Content-Encoding"))
}
if string(identityReadBody) != string(plainPayload) {
t.Fatalf("identity read body mismatch: got %q want %q", string(identityReadBody), string(plainPayload))
}
}

102
test/volume_server/http/headers_static_test.go

@ -0,0 +1,102 @@
package volume_server_http_test
import (
"fmt"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadPassthroughHeadersAndDownloadDisposition(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(96)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fullFileID := framework.NewFileID(volumeID, 661122, 0x55667788)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fullFileID, []byte("passthrough-header-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
parts := strings.SplitN(fullFileID, ",", 2)
if len(parts) != 2 {
t.Fatalf("unexpected file id format: %q", fullFileID)
}
fidOnly := parts[1]
url := fmt.Sprintf("%s/%d/%s/%s?response-content-type=text/plain&response-cache-control=no-store&dl=true",
clusterHarness.VolumeAdminURL(),
volumeID,
fidOnly,
"report.txt",
)
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, url))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusOK {
t.Fatalf("passthrough read expected 200, got %d", resp.StatusCode)
}
if resp.Header.Get("Content-Type") != "text/plain" {
t.Fatalf("response-content-type override mismatch: %q", resp.Header.Get("Content-Type"))
}
if resp.Header.Get("Cache-Control") != "no-store" {
t.Fatalf("response-cache-control override mismatch: %q", resp.Header.Get("Cache-Control"))
}
contentDisposition := resp.Header.Get("Content-Disposition")
if !strings.Contains(contentDisposition, "attachment") || !strings.Contains(contentDisposition, "report.txt") {
t.Fatalf("download disposition header mismatch: %q", contentDisposition)
}
}
func TestStaticAssetEndpoints(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
faviconResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/favicon.ico"))
_ = framework.ReadAllAndClose(t, faviconResp)
if faviconResp.StatusCode != http.StatusOK {
t.Fatalf("/favicon.ico expected 200, got %d", faviconResp.StatusCode)
}
staticResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/seaweedfsstatic/seaweed50x50.png"))
_ = framework.ReadAllAndClose(t, staticResp)
if staticResp.StatusCode != http.StatusOK {
t.Fatalf("/seaweedfsstatic/seaweed50x50.png expected 200, got %d", staticResp.StatusCode)
}
}
func TestStaticAssetEndpointsOnPublicPort(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
client := framework.NewHTTPClient()
faviconResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumePublicURL()+"/favicon.ico"))
_ = framework.ReadAllAndClose(t, faviconResp)
if faviconResp.StatusCode != http.StatusOK {
t.Fatalf("public /favicon.ico expected 200, got %d", faviconResp.StatusCode)
}
staticResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumePublicURL()+"/seaweedfsstatic/seaweed50x50.png"))
_ = framework.ReadAllAndClose(t, staticResp)
if staticResp.StatusCode != http.StatusOK {
t.Fatalf("public /seaweedfsstatic/seaweed50x50.png expected 200, got %d", staticResp.StatusCode)
}
}

92
test/volume_server/http/image_transform_test.go

@ -0,0 +1,92 @@
package volume_server_http_test
import (
"bytes"
"fmt"
"image"
"image/color"
"image/png"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func makePNGFixture(t testing.TB, width, height int) []byte {
t.Helper()
img := image.NewRGBA(image.Rect(0, 0, width, height))
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
img.Set(x, y, color.RGBA{R: uint8(x * 20), G: uint8(y * 20), B: 200, A: 255})
}
}
var buf bytes.Buffer
if err := png.Encode(&buf, img); err != nil {
t.Fatalf("encode png fixture: %v", err)
}
return buf.Bytes()
}
func decodeImageConfig(t testing.TB, data []byte) image.Config {
t.Helper()
cfg, _, err := image.DecodeConfig(bytes.NewReader(data))
if err != nil {
t.Fatalf("decode image config: %v", err)
}
return cfg
}
func TestImageResizeAndCropReadVariants(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(101)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fullFileID := framework.NewFileID(volumeID, 772004, 0x4D5E6F70)
uploadReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fullFileID, makePNGFixture(t, 6, 4))
uploadReq.Header.Set("Content-Type", "image/png")
uploadResp := framework.DoRequest(t, client, uploadReq)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("image upload expected 201, got %d", uploadResp.StatusCode)
}
parts := strings.SplitN(fullFileID, ",", 2)
if len(parts) != 2 {
t.Fatalf("unexpected file id format: %q", fullFileID)
}
fidOnly := parts[1]
resizeURL := fmt.Sprintf("%s/%d/%s/%s?width=2&height=1", clusterHarness.VolumeAdminURL(), volumeID, fidOnly, "fixture.png")
resizeResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, resizeURL))
resizeBody := framework.ReadAllAndClose(t, resizeResp)
if resizeResp.StatusCode != http.StatusOK {
t.Fatalf("image resize read expected 200, got %d", resizeResp.StatusCode)
}
resizeCfg := decodeImageConfig(t, resizeBody)
if resizeCfg.Width > 2 || resizeCfg.Height > 1 {
t.Fatalf("image resize expected dimensions <= 2x1, got %dx%d", resizeCfg.Width, resizeCfg.Height)
}
cropURL := fmt.Sprintf("%s/%d/%s/%s?crop_x1=1&crop_y1=1&crop_x2=4&crop_y2=3", clusterHarness.VolumeAdminURL(), volumeID, fidOnly, "fixture.png")
cropResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cropURL))
cropBody := framework.ReadAllAndClose(t, cropResp)
if cropResp.StatusCode != http.StatusOK {
t.Fatalf("image crop read expected 200, got %d", cropResp.StatusCode)
}
cropCfg := decodeImageConfig(t, cropBody)
if cropCfg.Width != 3 || cropCfg.Height != 2 {
t.Fatalf("image crop expected 3x2, got %dx%d", cropCfg.Width, cropCfg.Height)
}
}

287
test/volume_server/http/public_cors_methods_test.go

@ -0,0 +1,287 @@
package volume_server_http_test
import (
"bytes"
"net/http"
"strconv"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestPublicPortReadOnlyMethodBehavior(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(81)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 123321, 0x01020304)
originalData := []byte("public-port-original")
replacementData := []byte("public-port-replacement")
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, originalData)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("admin upload expected 201, got %d", uploadResp.StatusCode)
}
publicReadResp := framework.ReadBytes(t, client, clusterHarness.VolumePublicURL(), fid)
publicReadBody := framework.ReadAllAndClose(t, publicReadResp)
if publicReadResp.StatusCode != http.StatusOK {
t.Fatalf("public GET expected 200, got %d", publicReadResp.StatusCode)
}
if string(publicReadBody) != string(originalData) {
t.Fatalf("public GET body mismatch: got %q want %q", string(publicReadBody), string(originalData))
}
publicPostReq := newUploadRequest(t, clusterHarness.VolumePublicURL()+"/"+fid, replacementData)
publicPostResp := framework.DoRequest(t, client, publicPostReq)
_ = framework.ReadAllAndClose(t, publicPostResp)
if publicPostResp.StatusCode != http.StatusOK {
t.Fatalf("public POST expected passthrough 200, got %d", publicPostResp.StatusCode)
}
publicDeleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumePublicURL()+"/"+fid))
_ = framework.ReadAllAndClose(t, publicDeleteResp)
if publicDeleteResp.StatusCode != http.StatusOK {
t.Fatalf("public DELETE expected passthrough 200, got %d", publicDeleteResp.StatusCode)
}
adminReadResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
adminReadBody := framework.ReadAllAndClose(t, adminReadResp)
if adminReadResp.StatusCode != http.StatusOK {
t.Fatalf("admin GET after public POST/DELETE expected 200, got %d", adminReadResp.StatusCode)
}
if string(adminReadBody) != string(originalData) {
t.Fatalf("public port should not mutate data: got %q want %q", string(adminReadBody), string(originalData))
}
}
func TestCorsAndUnsupportedMethodBehavior(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(82)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 789789, 0x0A0B0C0D)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("cors-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("admin upload expected 201, got %d", uploadResp.StatusCode)
}
adminOriginReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
adminOriginReq.Header.Set("Origin", "https://example.com")
adminOriginResp := framework.DoRequest(t, client, adminOriginReq)
_ = framework.ReadAllAndClose(t, adminOriginResp)
if adminOriginResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("admin GET origin header mismatch: %q", adminOriginResp.Header.Get("Access-Control-Allow-Origin"))
}
if adminOriginResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("admin GET credentials header mismatch: %q", adminOriginResp.Header.Get("Access-Control-Allow-Credentials"))
}
publicOriginReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumePublicURL()+"/"+fid)
publicOriginReq.Header.Set("Origin", "https://example.com")
publicOriginResp := framework.DoRequest(t, client, publicOriginReq)
_ = framework.ReadAllAndClose(t, publicOriginResp)
if publicOriginResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("public GET origin header mismatch: %q", publicOriginResp.Header.Get("Access-Control-Allow-Origin"))
}
if publicOriginResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("public GET credentials header mismatch: %q", publicOriginResp.Header.Get("Access-Control-Allow-Credentials"))
}
adminPatchReq, err := http.NewRequest(http.MethodPatch, clusterHarness.VolumeAdminURL()+"/"+fid, bytes.NewReader([]byte("patch")))
if err != nil {
t.Fatalf("create admin PATCH request: %v", err)
}
adminPatchResp := framework.DoRequest(t, client, adminPatchReq)
_ = framework.ReadAllAndClose(t, adminPatchResp)
if adminPatchResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin PATCH expected 400, got %d", adminPatchResp.StatusCode)
}
publicPatchReq, err := http.NewRequest(http.MethodPatch, clusterHarness.VolumePublicURL()+"/"+fid, bytes.NewReader([]byte("patch")))
if err != nil {
t.Fatalf("create public PATCH request: %v", err)
}
publicPatchResp := framework.DoRequest(t, client, publicPatchReq)
_ = framework.ReadAllAndClose(t, publicPatchResp)
if publicPatchResp.StatusCode != http.StatusOK {
t.Fatalf("public PATCH expected passthrough 200, got %d", publicPatchResp.StatusCode)
}
}
func TestUnsupportedMethodTraceParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(83)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 123999, 0x01010101)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("trace-method-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
adminTraceReq := mustNewRequest(t, http.MethodTrace, clusterHarness.VolumeAdminURL()+"/"+fid)
adminTraceResp := framework.DoRequest(t, client, adminTraceReq)
_ = framework.ReadAllAndClose(t, adminTraceResp)
if adminTraceResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin TRACE expected 400, got %d", adminTraceResp.StatusCode)
}
publicTraceReq := mustNewRequest(t, http.MethodTrace, clusterHarness.VolumePublicURL()+"/"+fid)
publicTraceResp := framework.DoRequest(t, client, publicTraceReq)
_ = framework.ReadAllAndClose(t, publicTraceResp)
if publicTraceResp.StatusCode != http.StatusOK {
t.Fatalf("public TRACE expected passthrough 200, got %d", publicTraceResp.StatusCode)
}
}
func TestUnsupportedMethodPropfindParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(84)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 124000, 0x02020202)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("propfind-method-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
adminReq := mustNewRequest(t, "PROPFIND", clusterHarness.VolumeAdminURL()+"/"+fid)
adminResp := framework.DoRequest(t, client, adminReq)
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin PROPFIND expected 400, got %d", adminResp.StatusCode)
}
publicReq := mustNewRequest(t, "PROPFIND", clusterHarness.VolumePublicURL()+"/"+fid)
publicResp := framework.DoRequest(t, client, publicReq)
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public PROPFIND expected passthrough 200, got %d", publicResp.StatusCode)
}
verifyResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
verifyBody := framework.ReadAllAndClose(t, verifyResp)
if verifyResp.StatusCode != http.StatusOK {
t.Fatalf("verify GET expected 200, got %d", verifyResp.StatusCode)
}
if string(verifyBody) != "propfind-method-check" {
t.Fatalf("PROPFIND should not mutate data, got %q", string(verifyBody))
}
}
func TestUnsupportedMethodConnectParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(85)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 124001, 0x03030303)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("connect-method-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
adminReq := mustNewRequest(t, "CONNECT", clusterHarness.VolumeAdminURL()+"/"+fid)
adminResp := framework.DoRequest(t, client, adminReq)
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin CONNECT expected 400, got %d", adminResp.StatusCode)
}
publicReq := mustNewRequest(t, "CONNECT", clusterHarness.VolumePublicURL()+"/"+fid)
publicResp := framework.DoRequest(t, client, publicReq)
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public CONNECT expected passthrough 200, got %d", publicResp.StatusCode)
}
verifyResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
verifyBody := framework.ReadAllAndClose(t, verifyResp)
if verifyResp.StatusCode != http.StatusOK {
t.Fatalf("verify GET expected 200, got %d", verifyResp.StatusCode)
}
if string(verifyBody) != "connect-method-check" {
t.Fatalf("CONNECT should not mutate data, got %q", string(verifyBody))
}
}
func TestPublicPortHeadReadParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(86)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 124002, 0x04040404)
payload := []byte("public-head-parity-content")
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
headResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodHead, clusterHarness.VolumePublicURL()+"/"+fid))
headBody := framework.ReadAllAndClose(t, headResp)
if headResp.StatusCode != http.StatusOK {
t.Fatalf("public HEAD expected 200, got %d", headResp.StatusCode)
}
if got := headResp.Header.Get("Content-Length"); got != strconv.Itoa(len(payload)) {
t.Fatalf("public HEAD content-length mismatch: got %q want %d", got, len(payload))
}
if len(headBody) != 0 {
t.Fatalf("public HEAD body should be empty, got %d bytes", len(headBody))
}
}

82
test/volume_server/http/range_variants_test.go

@ -0,0 +1,82 @@
package volume_server_http_test
import (
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestMultiRangeReadReturnsMultipartPayload(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(97)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 771999, 0x0A1B2C3D)
payload := []byte("0123456789abcdef")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
multiRangeReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
multiRangeReq.Header.Set("Range", "bytes=0-1,4-5")
multiRangeResp := framework.DoRequest(t, client, multiRangeReq)
multiRangeBody := framework.ReadAllAndClose(t, multiRangeResp)
if multiRangeResp.StatusCode != http.StatusPartialContent {
t.Fatalf("multi-range expected 206, got %d", multiRangeResp.StatusCode)
}
if !strings.Contains(multiRangeResp.Header.Get("Content-Type"), "multipart/byteranges") {
t.Fatalf("multi-range content-type mismatch: %q", multiRangeResp.Header.Get("Content-Type"))
}
bodyText := string(multiRangeBody)
if !strings.Contains(bodyText, "01") || !strings.Contains(bodyText, "45") {
t.Fatalf("multi-range body missing expected segments: %q", bodyText)
}
}
func TestOversizedCombinedRangesAreIgnored(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(100)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772003, 0x3C4D5E6F)
payload := []byte("0123456789abcdef")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
// Range bytes sum is 22 (> payload size 16), which exercises the oversized-range guard path.
oversizedRangeReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
oversizedRangeReq.Header.Set("Range", "bytes=0-10,5-15")
oversizedRangeResp := framework.DoRequest(t, client, oversizedRangeReq)
oversizedRangeBody := framework.ReadAllAndClose(t, oversizedRangeResp)
if oversizedRangeResp.StatusCode != http.StatusOK {
t.Fatalf("oversized combined range expected 200, got %d", oversizedRangeResp.StatusCode)
}
if len(oversizedRangeBody) != 0 {
t.Fatalf("oversized combined range expected empty body, got %d bytes", len(oversizedRangeBody))
}
}

54
test/volume_server/http/read_deleted_test.go

@ -0,0 +1,54 @@
package volume_server_http_test
import (
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadDeletedQueryReturnsDeletedNeedleData(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(94)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 551234, 0xCAFE1234)
payload := []byte("read-deleted-needle-payload")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
}
normalRead := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, normalRead)
if normalRead.StatusCode != http.StatusNotFound {
t.Fatalf("normal read after delete expected 404, got %d", normalRead.StatusCode)
}
readDeletedReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?readDeleted=true")
readDeletedResp := framework.DoRequest(t, client, readDeletedReq)
readDeletedBody := framework.ReadAllAndClose(t, readDeletedResp)
if readDeletedResp.StatusCode != http.StatusOK {
t.Fatalf("read with readDeleted=true expected 200, got %d", readDeletedResp.StatusCode)
}
if string(readDeletedBody) != string(payload) {
t.Fatalf("readDeleted body mismatch: got %q want %q", string(readDeletedBody), string(payload))
}
}

319
test/volume_server/http/read_mode_proxy_redirect_test.go

@ -0,0 +1,319 @@
package volume_server_http_test
import (
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadModeProxyMissingLocalVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(101)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120001, 0x0102ABCD)
payload := []byte("proxy-read-mode-forwarded-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
readURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
var finalBody []byte
if !waitForHTTPStatus(t, client, readURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
finalBody = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("proxy read mode did not return 200 from non-owning volume server within deadline")
}
if string(finalBody) != string(payload) {
t.Fatalf("proxy read mode body mismatch: got %q want %q", string(finalBody), string(payload))
}
}
func TestReadModeRedirectMissingLocalVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "redirect"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(102)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120002, 0x0102DCBA)
payload := []byte("redirect-read-mode-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
noRedirectClient := &http.Client{
Timeout: 10 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
readURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
var redirectLocation string
if !waitForHTTPStatus(t, noRedirectClient, readURL, http.StatusMovedPermanently, 10*time.Second, func(resp *http.Response) {
redirectLocation = resp.Header.Get("Location")
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("redirect read mode did not return 301 from non-owning volume server within deadline")
}
if redirectLocation == "" {
t.Fatalf("redirect response missing Location header")
}
if !strings.Contains(redirectLocation, "proxied=true") {
t.Fatalf("redirect Location should include proxied=true, got %q", redirectLocation)
}
followResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, redirectLocation))
followBody := framework.ReadAllAndClose(t, followResp)
if followResp.StatusCode != http.StatusOK {
t.Fatalf("following redirect expected 200, got %d", followResp.StatusCode)
}
if string(followBody) != string(payload) {
t.Fatalf("redirect-follow body mismatch: got %q want %q", string(followBody), string(payload))
}
}
func TestReadModeLocalMissingLocalVolumeReturnsNotFound(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "local"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(103)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120003, 0x0102BEEF)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, []byte("local-read-mode-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
readResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(1), fid)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusNotFound {
t.Fatalf("local read mode expected 404 on non-owning server, got %d", readResp.StatusCode)
}
}
func TestReadDeletedProxyModeOnMissingLocalVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(104)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120004, 0x0102CAFE)
payload := []byte("proxy-readDeleted-missing-local-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL(0)+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
}
readURL := clusterHarness.VolumeAdminURL(1) + "/" + fid + "?readDeleted=true"
var proxiedBody []byte
if !waitForHTTPStatus(t, client, readURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
proxiedBody = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("proxy readDeleted path did not return 200 from non-owning volume server within deadline")
}
if string(proxiedBody) != string(payload) {
t.Fatalf("proxy readDeleted body mismatch: got %q want %q", string(proxiedBody), string(payload))
}
}
func TestReadDeletedRedirectModeDropsQueryParameterParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "redirect"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(105)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120005, 0x0102FACE)
payload := []byte("redirect-readDeleted-query-drop-parity")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL(0)+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
}
noRedirectClient := &http.Client{
Timeout: 10 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
redirectURL := clusterHarness.VolumeAdminURL(1) + "/" + fid + "?readDeleted=true"
var location string
if !waitForHTTPStatus(t, noRedirectClient, redirectURL, http.StatusMovedPermanently, 10*time.Second, func(resp *http.Response) {
location = resp.Header.Get("Location")
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("redirect readDeleted path did not return 301 from non-owning volume server within deadline")
}
if location == "" {
t.Fatalf("redirect readDeleted response missing Location header")
}
if !strings.Contains(location, "proxied=true") {
t.Fatalf("redirect readDeleted Location should include proxied=true, got %q", location)
}
if strings.Contains(location, "readDeleted=true") {
t.Fatalf("redirect readDeleted Location should reflect current query-drop behavior, got %q", location)
}
followResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, location))
_ = framework.ReadAllAndClose(t, followResp)
if followResp.StatusCode != http.StatusNotFound {
t.Fatalf("redirect-follow without readDeleted query expected 404 for deleted needle, got %d", followResp.StatusCode)
}
}
func TestReadModeRedirectPreservesCollectionQuery(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "redirect"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(109)
const collection = "redirect-collection"
framework.AllocateVolume(t, grpc0, volumeID, collection)
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120006, 0x0102F00D)
payload := []byte("redirect-collection-preserve-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
noRedirectClient := &http.Client{
Timeout: 10 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
redirectURL := clusterHarness.VolumeAdminURL(1) + "/" + fid + "?collection=" + collection
var location string
if !waitForHTTPStatus(t, noRedirectClient, redirectURL, http.StatusMovedPermanently, 10*time.Second, func(resp *http.Response) {
location = resp.Header.Get("Location")
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("redirect collection path did not return 301 from non-owning volume server within deadline")
}
if location == "" {
t.Fatalf("redirect collection response missing Location header")
}
if !strings.Contains(location, "proxied=true") {
t.Fatalf("redirect collection Location should include proxied=true, got %q", location)
}
if !strings.Contains(location, "collection="+collection) {
t.Fatalf("redirect collection Location should preserve collection query, got %q", location)
}
followResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, location))
followBody := framework.ReadAllAndClose(t, followResp)
if followResp.StatusCode != http.StatusOK {
t.Fatalf("redirect-follow expected 200, got %d", followResp.StatusCode)
}
if string(followBody) != string(payload) {
t.Fatalf("redirect-follow body mismatch: got %q want %q", string(followBody), string(payload))
}
}
func waitForHTTPStatus(t testing.TB, client *http.Client, url string, expectedStatus int, timeout time.Duration, onMatch func(resp *http.Response)) bool {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, url))
if resp.StatusCode == expectedStatus {
onMatch(resp)
return true
}
_ = framework.ReadAllAndClose(t, resp)
time.Sleep(200 * time.Millisecond)
}
return false
}

191
test/volume_server/http/read_path_variants_test.go

@ -0,0 +1,191 @@
package volume_server_http_test
import (
"fmt"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadPathShapesAndIfModifiedSince(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(93)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fullFileID := framework.NewFileID(volumeID, 771234, 0xBEEFCACE)
uploadPayload := []byte("read-path-shape-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fullFileID, uploadPayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
parts := strings.SplitN(fullFileID, ",", 2)
if len(parts) != 2 {
t.Fatalf("unexpected file id format: %q", fullFileID)
}
fidOnly := parts[1]
readByVidFid := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, fmt.Sprintf("%s/%d/%s", clusterHarness.VolumeAdminURL(), volumeID, fidOnly)))
readByVidFidBody := framework.ReadAllAndClose(t, readByVidFid)
if readByVidFid.StatusCode != http.StatusOK {
t.Fatalf("GET /{vid}/{fid} expected 200, got %d", readByVidFid.StatusCode)
}
if string(readByVidFidBody) != string(uploadPayload) {
t.Fatalf("GET /{vid}/{fid} body mismatch: got %q want %q", string(readByVidFidBody), string(uploadPayload))
}
readWithFilename := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, fmt.Sprintf("%s/%d/%s/%s", clusterHarness.VolumeAdminURL(), volumeID, fidOnly, "named.bin")))
readWithFilenameBody := framework.ReadAllAndClose(t, readWithFilename)
if readWithFilename.StatusCode != http.StatusOK {
t.Fatalf("GET /{vid}/{fid}/{filename} expected 200, got %d", readWithFilename.StatusCode)
}
if string(readWithFilenameBody) != string(uploadPayload) {
t.Fatalf("GET /{vid}/{fid}/{filename} body mismatch: got %q want %q", string(readWithFilenameBody), string(uploadPayload))
}
lastModified := readWithFilename.Header.Get("Last-Modified")
if lastModified == "" {
t.Fatalf("expected Last-Modified header on read response")
}
ifModifiedSinceReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fullFileID)
ifModifiedSinceReq.Header.Set("If-Modified-Since", lastModified)
ifModifiedSinceResp := framework.DoRequest(t, client, ifModifiedSinceReq)
_ = framework.ReadAllAndClose(t, ifModifiedSinceResp)
if ifModifiedSinceResp.StatusCode != http.StatusNotModified {
t.Fatalf("If-Modified-Since expected 304, got %d", ifModifiedSinceResp.StatusCode)
}
headIfModifiedSinceReq := mustNewRequest(t, http.MethodHead, clusterHarness.VolumeAdminURL()+"/"+fullFileID)
headIfModifiedSinceReq.Header.Set("If-Modified-Since", lastModified)
headIfModifiedSinceResp := framework.DoRequest(t, client, headIfModifiedSinceReq)
headIfModifiedSinceBody := framework.ReadAllAndClose(t, headIfModifiedSinceResp)
if headIfModifiedSinceResp.StatusCode != http.StatusNotModified {
t.Fatalf("HEAD If-Modified-Since expected 304, got %d", headIfModifiedSinceResp.StatusCode)
}
if len(headIfModifiedSinceBody) != 0 {
t.Fatalf("HEAD If-Modified-Since expected empty body, got %d bytes", len(headIfModifiedSinceBody))
}
}
func TestMalformedVidFidPathReturnsBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/not-a-vid/not-a-fid"))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusBadRequest {
t.Fatalf("malformed /{vid}/{fid} expected 400, got %d", resp.StatusCode)
}
}
func TestReadWrongCookieReturnsNotFound(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(95)
const needleID = uint64(771235)
const cookie = uint32(0xBEEFCACF)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("read-cookie-mismatch-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
wrongCookieFid := framework.NewFileID(volumeID, needleID, cookie+1)
getResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), wrongCookieFid)
_ = framework.ReadAllAndClose(t, getResp)
if getResp.StatusCode != http.StatusNotFound {
t.Fatalf("GET with wrong cookie expected 404, got %d", getResp.StatusCode)
}
headResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodHead, clusterHarness.VolumeAdminURL()+"/"+wrongCookieFid))
headBody := framework.ReadAllAndClose(t, headResp)
if headResp.StatusCode != http.StatusNotFound {
t.Fatalf("HEAD with wrong cookie expected 404, got %d", headResp.StatusCode)
}
if len(headBody) != 0 {
t.Fatalf("HEAD wrong-cookie response body should be empty, got %d bytes", len(headBody))
}
}
func TestConditionalHeaderPrecedenceAndInvalidIfModifiedSince(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(99)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772002, 0x2B3C4D5E)
payload := []byte("conditional-precedence-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
baselineResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, baselineResp)
if baselineResp.StatusCode != http.StatusOK {
t.Fatalf("baseline read expected 200, got %d", baselineResp.StatusCode)
}
lastModified := baselineResp.Header.Get("Last-Modified")
if lastModified == "" {
t.Fatalf("baseline read expected Last-Modified header")
}
precedenceReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
precedenceReq.Header.Set("If-Modified-Since", lastModified)
precedenceReq.Header.Set("If-None-Match", "\"definitely-different-etag\"")
precedenceResp := framework.DoRequest(t, client, precedenceReq)
precedenceBody := framework.ReadAllAndClose(t, precedenceResp)
if precedenceResp.StatusCode != http.StatusNotModified {
t.Fatalf("conditional precedence expected 304, got %d", precedenceResp.StatusCode)
}
if len(precedenceBody) != 0 {
t.Fatalf("conditional precedence expected empty body, got %d bytes", len(precedenceBody))
}
invalidIMSReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
invalidIMSReq.Header.Set("If-Modified-Since", "not-a-valid-http-date")
invalidIMSReq.Header.Set("If-None-Match", "\"definitely-different-etag\"")
invalidIMSResp := framework.DoRequest(t, client, invalidIMSReq)
invalidIMSBody := framework.ReadAllAndClose(t, invalidIMSResp)
if invalidIMSResp.StatusCode != http.StatusOK {
t.Fatalf("invalid If-Modified-Since with mismatched etag expected 200, got %d", invalidIMSResp.StatusCode)
}
if string(invalidIMSBody) != string(payload) {
t.Fatalf("invalid If-Modified-Since fallback body mismatch: got %q want %q", string(invalidIMSBody), string(payload))
}
}

123
test/volume_server/http/read_write_delete_test.go

@ -0,0 +1,123 @@
package volume_server_http_test
import (
"net/http"
"strconv"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestUploadReadRangeHeadDeleteRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(7)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 123456, 0xA1B2C3D4)
data := []byte("hello-volume-server-integration")
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, cluster.VolumeAdminURL(), fid, data)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
}
getResp := framework.ReadBytes(t, client, cluster.VolumeAdminURL(), fid)
getBody := framework.ReadAllAndClose(t, getResp)
if getResp.StatusCode != http.StatusOK {
t.Fatalf("get status: expected 200, got %d", getResp.StatusCode)
}
if string(getBody) != string(data) {
t.Fatalf("get body mismatch: got %q want %q", string(getBody), string(data))
}
etag := getResp.Header.Get("ETag")
if etag == "" {
t.Fatalf("expected ETag header from GET response")
}
notModifiedReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/"+fid)
notModifiedReq.Header.Set("If-None-Match", etag)
notModifiedResp := framework.DoRequest(t, client, notModifiedReq)
_ = framework.ReadAllAndClose(t, notModifiedResp)
if notModifiedResp.StatusCode != http.StatusNotModified {
t.Fatalf("if-none-match expected 304, got %d", notModifiedResp.StatusCode)
}
rangeReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/"+fid)
rangeReq.Header.Set("Range", "bytes=0-4")
rangeResp := framework.DoRequest(t, client, rangeReq)
rangeBody := framework.ReadAllAndClose(t, rangeResp)
if rangeResp.StatusCode != http.StatusPartialContent {
t.Fatalf("range status: expected 206, got %d", rangeResp.StatusCode)
}
if got, want := string(rangeBody), "hello"; got != want {
t.Fatalf("range body mismatch: got %q want %q", got, want)
}
invalidRangeReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/"+fid)
invalidRangeReq.Header.Set("Range", "bytes=9999-10000")
invalidRangeResp := framework.DoRequest(t, client, invalidRangeReq)
_ = framework.ReadAllAndClose(t, invalidRangeResp)
if invalidRangeResp.StatusCode != http.StatusRequestedRangeNotSatisfiable {
t.Fatalf("invalid range expected 416, got %d", invalidRangeResp.StatusCode)
}
headResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodHead, cluster.VolumeAdminURL()+"/"+fid))
headBody := framework.ReadAllAndClose(t, headResp)
if headResp.StatusCode != http.StatusOK {
t.Fatalf("head status: expected 200, got %d", headResp.StatusCode)
}
if got := headResp.Header.Get("Content-Length"); got != strconv.Itoa(len(data)) {
t.Fatalf("head content-length mismatch: got %q want %d", got, len(data))
}
if len(headBody) != 0 {
t.Fatalf("head body should be empty, got %d bytes", len(headBody))
}
headNotModifiedReq := mustNewRequest(t, http.MethodHead, cluster.VolumeAdminURL()+"/"+fid)
headNotModifiedReq.Header.Set("If-None-Match", etag)
headNotModifiedResp := framework.DoRequest(t, client, headNotModifiedReq)
headNotModifiedBody := framework.ReadAllAndClose(t, headNotModifiedResp)
if headNotModifiedResp.StatusCode != http.StatusNotModified {
t.Fatalf("head if-none-match expected 304, got %d", headNotModifiedResp.StatusCode)
}
if len(headNotModifiedBody) != 0 {
t.Fatalf("head if-none-match body should be empty, got %d bytes", len(headNotModifiedBody))
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, cluster.VolumeAdminURL()+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete status: expected 202, got %d", deleteResp.StatusCode)
}
notFoundResp := framework.ReadBytes(t, client, cluster.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, notFoundResp)
if notFoundResp.StatusCode != http.StatusNotFound {
t.Fatalf("read after delete: expected 404, got %d", notFoundResp.StatusCode)
}
}
func TestInvalidReadPathReturnsBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/invalid,needle"))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusBadRequest {
t.Fatalf("invalid read expected 400, got %d", resp.StatusCode)
}
}

730
test/volume_server/http/throttling_test.go

@ -0,0 +1,730 @@
package volume_server_http_test
import (
"bytes"
"context"
"io"
"net/http"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
)
type pausableReader struct {
remaining int64
pauseAfter int64
paused bool
unblock <-chan struct{}
}
func (r *pausableReader) Read(p []byte) (int, error) {
if r.remaining <= 0 {
return 0, io.EOF
}
if !r.paused && r.pauseAfter > 0 {
n := int64(len(p))
if n > r.pauseAfter {
n = r.pauseAfter
}
for i := int64(0); i < n; i++ {
p[i] = 'a'
}
r.remaining -= n
r.pauseAfter -= n
if r.pauseAfter == 0 {
r.paused = true
}
return int(n), nil
}
if r.paused {
<-r.unblock
r.paused = false
}
n := int64(len(p))
if n > r.remaining {
n = r.remaining
}
for i := int64(0); i < n; i++ {
p[i] = 'b'
}
r.remaining -= n
return int(n), nil
}
func TestUploadLimitTimeoutAndReplicateBypass(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(98)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024 // over 1MB P8 upload limit
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880001, 0x1A2B3C4D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}
firstUploadDone <- err
}()
// Give the first upload time to pass limit checks and block in body processing.
time.Sleep(300 * time.Millisecond)
replicateFID := framework.NewFileID(volumeID, 880002, 0x5E6F7A8B)
replicateReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+replicateFID+"?type=replicate", bytes.NewReader([]byte("replicate")))
if err != nil {
t.Fatalf("create replicate request: %v", err)
}
replicateReq.Header.Set("Content-Type", "application/octet-stream")
replicateReq.ContentLength = int64(len("replicate"))
replicateResp, err := framework.NewHTTPClient().Do(replicateReq)
if err != nil {
t.Fatalf("replicate request failed: %v", err)
}
_ = framework.ReadAllAndClose(t, replicateResp)
if replicateResp.StatusCode != http.StatusCreated {
t.Fatalf("replicate request expected 201 bypassing limit, got %d", replicateResp.StatusCode)
}
normalFID := framework.NewFileID(volumeID, 880003, 0x9C0D1E2F)
normalReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+normalFID, bytes.NewReader([]byte("normal")))
if err != nil {
t.Fatalf("create normal request: %v", err)
}
normalReq.Header.Set("Content-Type", "application/octet-stream")
normalReq.ContentLength = int64(len("normal"))
timeoutClient := &http.Client{Timeout: 10 * time.Second}
normalResp, err := timeoutClient.Do(normalReq)
if err != nil {
t.Fatalf("normal upload request failed: %v", err)
}
_ = framework.ReadAllAndClose(t, normalResp)
if normalResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("normal upload expected 429 while limit blocked, got %d", normalResp.StatusCode)
}
close(unblockFirstUpload)
select {
case <-firstUploadDone:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for blocked upload to finish")
}
}
func TestUploadLimitWaitThenProceed(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(111)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880601, 0x6A2B3C4D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_ = framework.ReadAllAndClose(t, resp)
}
firstUploadDone <- err
}()
time.Sleep(300 * time.Millisecond)
type uploadResult struct {
resp *http.Response
err error
}
secondUploadDone := make(chan uploadResult, 1)
secondFID := framework.NewFileID(volumeID, 880602, 0x6A2B3C4E)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+secondFID, bytes.NewReader([]byte("wait-then-proceed")))
if err != nil {
secondUploadDone <- uploadResult{err: err}
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = int64(len("wait-then-proceed"))
resp, err := (&http.Client{Timeout: 10 * time.Second}).Do(req)
secondUploadDone <- uploadResult{resp: resp, err: err}
}()
time.Sleep(500 * time.Millisecond)
close(unblockFirstUpload)
select {
case firstErr := <-firstUploadDone:
if firstErr != nil {
t.Fatalf("first blocked upload failed: %v", firstErr)
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for first upload completion")
}
select {
case result := <-secondUploadDone:
if result.err != nil {
t.Fatalf("second upload failed: %v", result.err)
}
_ = framework.ReadAllAndClose(t, result.resp)
if result.resp.StatusCode != http.StatusCreated {
t.Fatalf("second upload expected 201 after waiting for slot, got %d", result.resp.StatusCode)
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for second upload completion")
}
}
func TestUploadLimitTimeoutThenRecovery(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(113)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880801, 0x7A2B3C4D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_ = framework.ReadAllAndClose(t, resp)
}
firstUploadDone <- err
}()
time.Sleep(300 * time.Millisecond)
timeoutFID := framework.NewFileID(volumeID, 880802, 0x7A2B3C4E)
timeoutResp := framework.UploadBytes(t, &http.Client{Timeout: 10 * time.Second}, clusterHarness.VolumeAdminURL(), timeoutFID, []byte("should-timeout"))
_ = framework.ReadAllAndClose(t, timeoutResp)
if timeoutResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second upload under blocked pressure expected 429, got %d", timeoutResp.StatusCode)
}
close(unblockFirstUpload)
select {
case firstErr := <-firstUploadDone:
if firstErr != nil {
t.Fatalf("first blocked upload failed: %v", firstErr)
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for first upload completion")
}
recoveryFID := framework.NewFileID(volumeID, 880803, 0x7A2B3C4F)
recoveryResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), recoveryFID, []byte("recovered-upload"))
_ = framework.ReadAllAndClose(t, recoveryResp)
if recoveryResp.StatusCode != http.StatusCreated {
t.Fatalf("recovery upload expected 201, got %d", recoveryResp.StatusCode)
}
}
func TestDownloadLimitTimeoutReturnsTooManyRequests(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(99)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024) // over 1MB P8 download limit
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
downloadFID := framework.NewFileID(volumeID, 880101, 0x10203040)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), downloadFID, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+downloadFID))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
// Keep first response body unread so server write path stays in-flight.
time.Sleep(300 * time.Millisecond)
secondClient := &http.Client{Timeout: 10 * time.Second}
secondResp, err := secondClient.Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+downloadFID))
if err != nil {
t.Fatalf("second GET failed: %v", err)
}
_ = framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second GET expected 429 while first download holds limit, got %d", secondResp.StatusCode)
}
}
func TestDownloadLimitWaitThenProceedWithoutReplica(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(112)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880701, 0x60708090)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
type readResult struct {
resp *http.Response
err error
}
secondReadDone := make(chan readResult, 1)
go func() {
resp, readErr := (&http.Client{Timeout: 10 * time.Second}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
secondReadDone <- readResult{resp: resp, err: readErr}
}()
time.Sleep(500 * time.Millisecond)
_ = firstResp.Body.Close()
select {
case result := <-secondReadDone:
if result.err != nil {
t.Fatalf("second GET failed: %v", result.err)
}
secondBody := framework.ReadAllAndClose(t, result.resp)
if result.resp.StatusCode != http.StatusOK {
t.Fatalf("second GET expected 200 after waiting for slot, got %d", result.resp.StatusCode)
}
if len(secondBody) != len(largePayload) {
t.Fatalf("second GET body size mismatch: got %d want %d", len(secondBody), len(largePayload))
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for second GET completion")
}
}
func TestDownloadLimitTimeoutThenRecovery(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(114)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880901, 0x708090A0)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
time.Sleep(300 * time.Millisecond)
timeoutResp := framework.ReadBytes(t, &http.Client{Timeout: 10 * time.Second}, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, timeoutResp)
if timeoutResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second GET under blocked pressure expected 429, got %d", timeoutResp.StatusCode)
}
_ = firstResp.Body.Close()
recoveryResp := framework.ReadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid)
recoveryBody := framework.ReadAllAndClose(t, recoveryResp)
if recoveryResp.StatusCode != http.StatusOK {
t.Fatalf("recovery GET expected 200, got %d", recoveryResp.StatusCode)
}
if len(recoveryBody) != len(largePayload) {
t.Fatalf("recovery GET body size mismatch: got %d want %d", len(recoveryBody), len(largePayload))
}
}
func TestDownloadLimitOverageProxiesToReplica(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P8()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
conn1, grpc1 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer conn1.Close()
const volumeID = uint32(100)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &volume_server_pb.AllocateVolumeRequest{
VolumeId: volumeID,
Replication: "001",
Version: uint32(needle.GetCurrentVersion()),
}
if _, err := grpc0.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node0: %v", err)
}
if _, err := grpc1.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node1: %v", err)
}
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880201, 0x0A0B0C0D)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(0), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("replicated large upload expected 201, got %d", uploadResp.StatusCode)
}
replicaReadURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
if !waitForHTTPStatus(t, framework.NewHTTPClient(), replicaReadURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("replica did not become readable within deadline")
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
secondResp, err := framework.NewHTTPClient().Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid))
if err != nil {
t.Fatalf("second GET failed: %v", err)
}
secondBody := framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusOK {
t.Fatalf("second GET expected 200 via replica proxy fallback, got %d", secondResp.StatusCode)
}
if len(secondBody) != len(largePayload) {
t.Fatalf("second GET proxied body size mismatch: got %d want %d", len(secondBody), len(largePayload))
}
}
func TestDownloadLimitProxiedRequestSkipsReplicaFallbackAndTimesOut(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P8()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
conn1, grpc1 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer conn1.Close()
const volumeID = uint32(106)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &volume_server_pb.AllocateVolumeRequest{
VolumeId: volumeID,
Replication: "001",
Version: uint32(needle.GetCurrentVersion()),
}
if _, err := grpc0.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node0: %v", err)
}
if _, err := grpc1.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node1: %v", err)
}
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880202, 0x0A0B0D0E)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(0), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("replicated large upload expected 201, got %d", uploadResp.StatusCode)
}
// Ensure replica path is actually available, so a non-proxied request would proxy.
replicaReadURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
if !waitForHTTPStatus(t, framework.NewHTTPClient(), replicaReadURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("replica did not become readable within deadline")
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
// proxied=true should bypass replica fallback and hit wait/timeout branch.
secondResp, err := framework.NewHTTPClient().Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid+"?proxied=true"))
if err != nil {
t.Fatalf("second GET failed: %v", err)
}
_ = framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second GET with proxied=true expected 429 timeout path, got %d", secondResp.StatusCode)
}
}
func TestUploadLimitDisabledAllowsConcurrentUploads(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(107)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880301, 0x1A2B3C5D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_ = framework.ReadAllAndClose(t, resp)
}
firstUploadDone <- err
}()
time.Sleep(300 * time.Millisecond)
secondFID := framework.NewFileID(volumeID, 880302, 0x1A2B3C5E)
secondResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), secondFID, []byte("no-limit-second-upload"))
_ = framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusCreated {
t.Fatalf("second upload with disabled limit expected 201, got %d", secondResp.StatusCode)
}
close(unblockFirstUpload)
select {
case <-firstUploadDone:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for first upload completion")
}
}
func TestDownloadLimitDisabledAllowsConcurrentDownloads(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(108)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880401, 0x20304050)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
secondResp := framework.ReadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid)
secondBody := framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusOK {
t.Fatalf("second GET with disabled limit expected 200, got %d", secondResp.StatusCode)
}
if len(secondBody) != len(largePayload) {
t.Fatalf("second GET body size mismatch: got %d want %d", len(secondBody), len(largePayload))
}
}
func TestDownloadLimitInvalidVidWhileOverLimitReturnsBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(110)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880501, 0x50607080)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
invalidReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/not-a-vid,1234567890ab")
invalidResp := framework.DoRequest(t, framework.NewHTTPClient(), invalidReq)
_ = framework.ReadAllAndClose(t, invalidResp)
if invalidResp.StatusCode != http.StatusBadRequest {
t.Fatalf("invalid vid while over limit expected 400, got %d", invalidResp.StatusCode)
}
}

118
test/volume_server/http/write_delete_variants_test.go

@ -0,0 +1,118 @@
package volume_server_http_test
import (
"encoding/json"
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestWriteUnchangedAndDeleteEdgeVariants(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(87)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const key = uint64(999001)
const cookie = uint32(0xDEADBEEF)
fid := framework.NewFileID(volumeID, key, cookie)
client := framework.NewHTTPClient()
payload := []byte("unchanged-write-content")
firstUpload := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
firstUploadResp := framework.DoRequest(t, client, firstUpload)
_ = framework.ReadAllAndClose(t, firstUploadResp)
if firstUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("first upload expected 201, got %d", firstUploadResp.StatusCode)
}
secondUpload := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
secondUploadResp := framework.DoRequest(t, client, secondUpload)
_ = framework.ReadAllAndClose(t, secondUploadResp)
if secondUploadResp.StatusCode != http.StatusNoContent {
t.Fatalf("second unchanged upload expected 204, got %d", secondUploadResp.StatusCode)
}
if secondUploadResp.Header.Get("ETag") == "" {
t.Fatalf("second unchanged upload expected ETag header")
}
wrongCookieFid := framework.NewFileID(volumeID, key, cookie+1)
wrongCookieDelete := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+wrongCookieFid))
_ = framework.ReadAllAndClose(t, wrongCookieDelete)
if wrongCookieDelete.StatusCode != http.StatusBadRequest {
t.Fatalf("delete with mismatched cookie expected 400, got %d", wrongCookieDelete.StatusCode)
}
missingDelete := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+framework.NewFileID(volumeID, key+1, cookie)))
missingDeleteBody := framework.ReadAllAndClose(t, missingDelete)
if missingDelete.StatusCode != http.StatusNotFound {
t.Fatalf("delete missing needle expected 404, got %d", missingDelete.StatusCode)
}
var payloadMap map[string]int64
if err := json.Unmarshal(missingDeleteBody, &payloadMap); err != nil {
t.Fatalf("decode delete missing response: %v", err)
}
if payloadMap["size"] != 0 {
t.Fatalf("delete missing needle expected size=0, got %d", payloadMap["size"])
}
}
func TestDeleteTimestampOverrideKeepsReadDeletedLastModifiedParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(88)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 999002, 0xABCD1234)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("delete-ts-override"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
beforeDeleteResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, beforeDeleteResp)
if beforeDeleteResp.StatusCode != http.StatusOK {
t.Fatalf("pre-delete read expected 200, got %d", beforeDeleteResp.StatusCode)
}
lastModifiedBeforeDelete := beforeDeleteResp.Header.Get("Last-Modified")
if lastModifiedBeforeDelete == "" {
t.Fatalf("expected Last-Modified before delete")
}
deleteReq := mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+fid+"?ts=1700000000")
deleteResp := framework.DoRequest(t, client, deleteReq)
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete with ts override expected 202, got %d", deleteResp.StatusCode)
}
readDeletedResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?readDeleted=true"))
_ = framework.ReadAllAndClose(t, readDeletedResp)
if readDeletedResp.StatusCode != http.StatusOK {
t.Fatalf("readDeleted after ts override expected 200, got %d", readDeletedResp.StatusCode)
}
lastModified := readDeletedResp.Header.Get("Last-Modified")
if lastModified == "" {
t.Fatalf("expected Last-Modified header on readDeleted response")
}
if lastModified != lastModifiedBeforeDelete {
t.Fatalf("expected readDeleted Last-Modified parity with pre-delete header, got %q want %q", lastModified, lastModifiedBeforeDelete)
}
}

74
test/volume_server/http/write_error_variants_test.go

@ -0,0 +1,74 @@
package volume_server_http_test
import (
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestWriteInvalidVidAndFidReturnBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
invalidVidReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/invalid,12345678", []byte("x"))
invalidVidResp := framework.DoRequest(t, client, invalidVidReq)
_ = framework.ReadAllAndClose(t, invalidVidResp)
if invalidVidResp.StatusCode != http.StatusBadRequest {
t.Fatalf("write with invalid vid expected 400, got %d", invalidVidResp.StatusCode)
}
invalidFidReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/1,bad", []byte("x"))
invalidFidResp := framework.DoRequest(t, client, invalidFidReq)
_ = framework.ReadAllAndClose(t, invalidFidResp)
if invalidFidResp.StatusCode != http.StatusBadRequest {
t.Fatalf("write with invalid fid expected 400, got %d", invalidFidResp.StatusCode)
}
}
func TestWriteMalformedMultipartAndMD5Mismatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(98)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772001, 0x1A2B3C4D)
malformedMultipartReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+fid, strings.NewReader("not-a-valid-multipart-body"))
if err != nil {
t.Fatalf("create malformed multipart request: %v", err)
}
malformedMultipartReq.Header.Set("Content-Type", "multipart/form-data")
malformedMultipartResp := framework.DoRequest(t, client, malformedMultipartReq)
malformedMultipartBody := framework.ReadAllAndClose(t, malformedMultipartResp)
if malformedMultipartResp.StatusCode != http.StatusBadRequest {
t.Fatalf("malformed multipart write expected 400, got %d", malformedMultipartResp.StatusCode)
}
if !strings.Contains(strings.ToLower(string(malformedMultipartBody)), "boundary") {
t.Fatalf("malformed multipart response should mention boundary parse failure, got %q", string(malformedMultipartBody))
}
md5MismatchReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, []byte("content-md5-mismatch-body"))
md5MismatchReq.Header.Set("Content-MD5", "AAAAAAAAAAAAAAAAAAAAAA==")
md5MismatchResp := framework.DoRequest(t, client, md5MismatchReq)
md5MismatchBody := framework.ReadAllAndClose(t, md5MismatchResp)
if md5MismatchResp.StatusCode != http.StatusBadRequest {
t.Fatalf("content-md5 mismatch write expected 400, got %d", md5MismatchResp.StatusCode)
}
if !strings.Contains(string(md5MismatchBody), "Content-MD5") {
t.Fatalf("content-md5 mismatch response should mention Content-MD5, got %q", string(md5MismatchBody))
}
}

63
test/volume_server/matrix/config_profiles.go

@ -0,0 +1,63 @@
package matrix
import "time"
// Profile describes one runtime test matrix configuration.
type Profile struct {
Name string
ReadMode string
SplitPublicPort bool
EnableJWT bool
JWTSigningKey string
JWTReadKey string
EnableMaintain bool
ConcurrentUploadLimitMB int
ConcurrentDownloadLimitMB int
InflightUploadTimeout time.Duration
InflightDownloadTimeout time.Duration
ReplicatedLayout bool
HasErasureCoding bool
HasRemoteTier bool
}
// P1 is the baseline profile: one volume server, no JWT, proxy read mode.
func P1() Profile {
return Profile{
Name: "P1",
ReadMode: "proxy",
SplitPublicPort: false,
}
}
// P2 uses split public/admin ports to verify public read-only behavior.
func P2() Profile {
p := P1()
p.Name = "P2"
p.SplitPublicPort = true
return p
}
// P3 enables JWT verification for read/write flows.
func P3() Profile {
p := P1()
p.Name = "P3"
p.EnableJWT = true
p.JWTSigningKey = "volume-server-write-key"
p.JWTReadKey = "volume-server-read-key"
return p
}
// P8 enables upload/download throttling branches.
func P8() Profile {
p := P1()
p.Name = "P8"
p.ConcurrentUploadLimitMB = 1
p.ConcurrentDownloadLimitMB = 1
p.InflightUploadTimeout = 2 * time.Second
p.InflightDownloadTimeout = 2 * time.Second
return p
}
Loading…
Cancel
Save