Browse Source

Fix TUS integration tests and creation-with-upload

- Fix test URLs to use full URLs instead of relative paths
- Fix creation-with-upload to refresh session before completing
- Fix Makefile to properly handle test cleanup
- Add FullURL helper function to TestCluster
feature/tus-protocol
chrislu 2 days ago
parent
commit
45b32ad0c7
  1. 1
      .gitignore
  2. 28
      test/tus/Makefile
  3. 32
      test/tus/tus_integration_test.go
  4. 7
      weed/server/filer_server_tus_handlers.go

1
.gitignore

@ -124,3 +124,4 @@ ADVANCED_IAM_DEVELOPMENT_PLAN.md
*.log
weed-iam
test/kafka/kafka-client-loadtest/weed-linux-arm64
/test/tus/filerldb2

28
test/tus/Makefile

@ -64,9 +64,9 @@ check-binary:
start-seaweedfs: check-binary
@echo "$(YELLOW)Starting SeaweedFS server for TUS testing...$(NC)"
@# Clean up any existing processes on our test ports
@lsof -ti :$(MASTER_PORT) | xargs -r kill -TERM 2>/dev/null || true
@lsof -ti :$(VOLUME_PORT) | xargs -r kill -TERM 2>/dev/null || true
@lsof -ti :$(FILER_PORT) | xargs -r kill -TERM 2>/dev/null || true
@lsof -ti :$(MASTER_PORT) | xargs kill -TERM 2>/dev/null || true
@lsof -ti :$(VOLUME_PORT) | xargs kill -TERM 2>/dev/null || true
@lsof -ti :$(FILER_PORT) | xargs kill -TERM 2>/dev/null || true
@sleep 2
# Create necessary directories
@ -74,12 +74,9 @@ start-seaweedfs: check-binary
@mkdir -p /tmp/seaweedfs-test-tus-volume
@mkdir -p /tmp/seaweedfs-test-tus-filer
# Determine binary path
$(eval WEED_BIN := $(shell command -v $(SEAWEEDFS_BINARY) 2>/dev/null || echo "$(SEAWEEDFS_ROOT)/weed/weed"))
# Start master server
# Start master server (use freshly built binary)
@echo "Starting master server..."
@nohup $(WEED_BIN) master \
@nohup $(SEAWEEDFS_ROOT)/weed/weed master \
-port=$(MASTER_PORT) \
-mdir=/tmp/seaweedfs-test-tus-master \
-volumeSizeLimitMB=$(VOLUME_MAX_SIZE_MB) \
@ -89,7 +86,7 @@ start-seaweedfs: check-binary
# Start volume server
@echo "Starting volume server..."
@nohup $(WEED_BIN) volume \
@nohup $(SEAWEEDFS_ROOT)/weed/weed volume \
-port=$(VOLUME_PORT) \
-mserver=127.0.0.1:$(MASTER_PORT) \
-dir=/tmp/seaweedfs-test-tus-volume \
@ -100,7 +97,7 @@ start-seaweedfs: check-binary
# Start filer server
@echo "Starting filer server..."
@nohup $(WEED_BIN) filer \
@nohup $(SEAWEEDFS_ROOT)/weed/weed filer \
-port=$(FILER_PORT) \
-master=127.0.0.1:$(MASTER_PORT) \
-ip=127.0.0.1 \
@ -176,11 +173,14 @@ test-errors: check-binary
# Run tests with automatic server management
test-with-server: build-weed
@echo "$(YELLOW)Running TUS tests with automatic server management...$(NC)"
@trap '$(MAKE) stop-seaweedfs clean || true' EXIT; \
$(MAKE) start-seaweedfs && \
@$(MAKE) -C $(TEST_DIR) start-seaweedfs && \
sleep 3 && \
cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) ./test/tus/... && \
echo "$(GREEN)All TUS tests passed!$(NC)"
cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) ./test/tus/...; \
TEST_RESULT=$$?; \
$(MAKE) -C $(TEST_DIR) stop-seaweedfs; \
$(MAKE) -C $(TEST_DIR) clean; \
if [ $$TEST_RESULT -eq 0 ]; then echo "$(GREEN)All TUS tests passed!$(NC)"; fi; \
exit $$TEST_RESULT
# Debug targets
debug-logs:

32
test/tus/tus_integration_test.go

@ -57,6 +57,14 @@ func (c *TestCluster) TusURL() string {
return fmt.Sprintf("%s/.tus", c.FilerURL())
}
// FullURL converts a relative path to a full URL
func (c *TestCluster) FullURL(path string) string {
if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") {
return path
}
return fmt.Sprintf("http://127.0.0.1:%s%s", testFilerPort, path)
}
// startTestCluster starts a SeaweedFS cluster for testing
func startTestCluster(t *testing.T, ctx context.Context) (*TestCluster, error) {
weedBinary := findWeedBinary()
@ -260,7 +268,7 @@ func TestTusBasicUpload(t *testing.T) {
t.Logf("Upload location: %s", uploadLocation)
// Step 2: Upload data (PATCH)
patchReq, err := http.NewRequest(http.MethodPatch, uploadLocation, bytes.NewReader(testData))
patchReq, err := http.NewRequest(http.MethodPatch, cluster.FullURL(uploadLocation), bytes.NewReader(testData))
require.NoError(t, err)
patchReq.Header.Set("Tus-Resumable", TusVersion)
patchReq.Header.Set("Upload-Offset", "0")
@ -336,7 +344,7 @@ func TestTusChunkedUpload(t *testing.T) {
}
chunk := testData[offset:end]
patchReq, err := http.NewRequest(http.MethodPatch, uploadLocation, bytes.NewReader(chunk))
patchReq, err := http.NewRequest(http.MethodPatch, cluster.FullURL(uploadLocation), bytes.NewReader(chunk))
require.NoError(t, err)
patchReq.Header.Set("Tus-Resumable", TusVersion)
patchReq.Header.Set("Upload-Offset", strconv.Itoa(offset))
@ -400,7 +408,7 @@ func TestTusHeadRequest(t *testing.T) {
uploadLocation := createResp.Header.Get("Location")
// HEAD before any data uploaded - offset should be 0
headReq1, err := http.NewRequest(http.MethodHead, uploadLocation, nil)
headReq1, err := http.NewRequest(http.MethodHead, cluster.FullURL(uploadLocation), nil)
require.NoError(t, err)
headReq1.Header.Set("Tus-Resumable", TusVersion)
@ -414,7 +422,7 @@ func TestTusHeadRequest(t *testing.T) {
// Upload half the data
halfLen := len(testData) / 2
patchReq, err := http.NewRequest(http.MethodPatch, uploadLocation, bytes.NewReader(testData[:halfLen]))
patchReq, err := http.NewRequest(http.MethodPatch, cluster.FullURL(uploadLocation), bytes.NewReader(testData[:halfLen]))
require.NoError(t, err)
patchReq.Header.Set("Tus-Resumable", TusVersion)
patchReq.Header.Set("Upload-Offset", "0")
@ -426,7 +434,7 @@ func TestTusHeadRequest(t *testing.T) {
require.Equal(t, http.StatusNoContent, patchResp.StatusCode)
// HEAD after partial upload - offset should be halfLen
headReq2, err := http.NewRequest(http.MethodHead, uploadLocation, nil)
headReq2, err := http.NewRequest(http.MethodHead, cluster.FullURL(uploadLocation), nil)
require.NoError(t, err)
headReq2.Header.Set("Tus-Resumable", TusVersion)
@ -472,7 +480,7 @@ func TestTusDeleteUpload(t *testing.T) {
uploadLocation := createResp.Header.Get("Location")
// Upload some data
patchReq, err := http.NewRequest(http.MethodPatch, uploadLocation, bytes.NewReader(testData[:10]))
patchReq, err := http.NewRequest(http.MethodPatch, cluster.FullURL(uploadLocation), bytes.NewReader(testData[:10]))
require.NoError(t, err)
patchReq.Header.Set("Tus-Resumable", TusVersion)
patchReq.Header.Set("Upload-Offset", "0")
@ -483,7 +491,7 @@ func TestTusDeleteUpload(t *testing.T) {
patchResp.Body.Close()
// Delete the upload
deleteReq, err := http.NewRequest(http.MethodDelete, uploadLocation, nil)
deleteReq, err := http.NewRequest(http.MethodDelete, cluster.FullURL(uploadLocation), nil)
require.NoError(t, err)
deleteReq.Header.Set("Tus-Resumable", TusVersion)
@ -494,7 +502,7 @@ func TestTusDeleteUpload(t *testing.T) {
assert.Equal(t, http.StatusNoContent, deleteResp.StatusCode, "DELETE should return 204")
// Verify upload is gone - HEAD should return 404
headReq, err := http.NewRequest(http.MethodHead, uploadLocation, nil)
headReq, err := http.NewRequest(http.MethodHead, cluster.FullURL(uploadLocation), nil)
require.NoError(t, err)
headReq.Header.Set("Tus-Resumable", TusVersion)
@ -538,7 +546,7 @@ func TestTusInvalidOffset(t *testing.T) {
uploadLocation := createResp.Header.Get("Location")
// Try to upload with wrong offset (should be 0, but we send 100)
patchReq, err := http.NewRequest(http.MethodPatch, uploadLocation, bytes.NewReader(testData))
patchReq, err := http.NewRequest(http.MethodPatch, cluster.FullURL(uploadLocation), bytes.NewReader(testData))
require.NoError(t, err)
patchReq.Header.Set("Tus-Resumable", TusVersion)
patchReq.Header.Set("Upload-Offset", "100") // Wrong offset!
@ -687,7 +695,7 @@ func TestTusResumeAfterInterruption(t *testing.T) {
// Upload first 20KB
firstChunkSize := 20 * 1024
patchReq1, err := http.NewRequest(http.MethodPatch, uploadLocation, bytes.NewReader(testData[:firstChunkSize]))
patchReq1, err := http.NewRequest(http.MethodPatch, cluster.FullURL(uploadLocation), bytes.NewReader(testData[:firstChunkSize]))
require.NoError(t, err)
patchReq1.Header.Set("Tus-Resumable", TusVersion)
patchReq1.Header.Set("Upload-Offset", "0")
@ -701,7 +709,7 @@ func TestTusResumeAfterInterruption(t *testing.T) {
t.Log("Simulating network interruption...")
// Simulate resumption: Query current offset with HEAD
headReq, err := http.NewRequest(http.MethodHead, uploadLocation, nil)
headReq, err := http.NewRequest(http.MethodHead, cluster.FullURL(uploadLocation), nil)
require.NoError(t, err)
headReq.Header.Set("Tus-Resumable", TusVersion)
@ -715,7 +723,7 @@ func TestTusResumeAfterInterruption(t *testing.T) {
require.Equal(t, firstChunkSize, currentOffset)
// Resume upload from current offset
patchReq2, err := http.NewRequest(http.MethodPatch, uploadLocation, bytes.NewReader(testData[currentOffset:]))
patchReq2, err := http.NewRequest(http.MethodPatch, cluster.FullURL(uploadLocation), bytes.NewReader(testData[currentOffset:]))
require.NoError(t, err)
patchReq2.Header.Set("Tus-Resumable", TusVersion)
patchReq2.Header.Set("Upload-Offset", strconv.Itoa(currentOffset))

7
weed/server/filer_server_tus_handlers.go

@ -135,6 +135,13 @@ func (fs *FilerServer) tusCreateHandler(w http.ResponseWriter, r *http.Request)
// Check if upload is complete
if bytesWritten == session.Size {
// Refresh session to get updated chunks
session, err = fs.getTusSession(ctx, uploadID)
if err != nil {
glog.Errorf("Failed to get updated TUS session: %v", err)
http.Error(w, "Failed to complete upload", http.StatusInternalServerError)
return
}
if err := fs.completeTusUpload(ctx, session); err != nil {
glog.Errorf("Failed to complete TUS upload: %v", err)
http.Error(w, "Failed to complete upload", http.StatusInternalServerError)

Loading…
Cancel
Save