17 changed files with 2932 additions and 26 deletions
-
119.github/workflows/s3-versioning-tests.yml
-
1.gitignore
-
360test/s3/retention/Makefile
-
264test/s3/retention/README.md
-
31test/s3/retention/go.mod
-
62test/s3/retention/go.sum
-
627test/s3/retention/s3_retention_test.go
-
455test/s3/retention/s3_worm_integration_test.go
-
9test/s3/retention/test_config.json
-
21weed/s3api/s3_constants/extend_key.go
-
23weed/s3api/s3api_object_handlers_delete.go
-
8weed/s3api/s3api_object_handlers_put.go
-
331weed/s3api/s3api_object_handlers_retention.go
-
24weed/s3api/s3api_object_handlers_skip.go
-
611weed/s3api/s3api_object_retention.go
-
6weed/s3api/s3api_server.go
-
6weed/s3api/s3err/s3api_errors.go
@ -0,0 +1,360 @@ |
|||
# S3 API Retention Test Makefile
|
|||
# This Makefile provides comprehensive targets for running S3 retention tests
|
|||
|
|||
.PHONY: help build-weed setup-server start-server stop-server test-retention test-retention-quick test-retention-comprehensive test-retention-worm test-all clean logs check-deps |
|||
|
|||
# Configuration
|
|||
WEED_BINARY := ../../../weed/weed_binary |
|||
S3_PORT := 8333 |
|||
MASTER_PORT := 9333 |
|||
VOLUME_PORT := 8080 |
|||
FILER_PORT := 8888 |
|||
TEST_TIMEOUT := 15m |
|||
TEST_PATTERN := TestRetention |
|||
|
|||
# Default target
|
|||
help: |
|||
@echo "S3 API Retention Test Makefile" |
|||
@echo "" |
|||
@echo "Available targets:" |
|||
@echo " help - Show this help message" |
|||
@echo " build-weed - Build the SeaweedFS binary" |
|||
@echo " check-deps - Check dependencies and build binary if needed" |
|||
@echo " start-server - Start SeaweedFS server for testing" |
|||
@echo " start-server-simple - Start server without process cleanup (for CI)" |
|||
@echo " stop-server - Stop SeaweedFS server" |
|||
@echo " test-retention - Run all retention tests" |
|||
@echo " test-retention-quick - Run core retention tests only" |
|||
@echo " test-retention-simple - Run tests without server management" |
|||
@echo " test-retention-comprehensive - Run comprehensive retention tests" |
|||
@echo " test-retention-worm - Run WORM integration tests" |
|||
@echo " test-all - Run all S3 API retention tests" |
|||
@echo " test-with-server - Start server, run tests, stop server" |
|||
@echo " logs - Show server logs" |
|||
@echo " clean - Clean up test artifacts and stop server" |
|||
@echo " health-check - Check if server is accessible" |
|||
@echo "" |
|||
@echo "Configuration:" |
|||
@echo " S3_PORT=${S3_PORT}" |
|||
@echo " TEST_TIMEOUT=${TEST_TIMEOUT}" |
|||
|
|||
# Build the SeaweedFS binary
|
|||
build-weed: |
|||
@echo "Building SeaweedFS binary..." |
|||
@cd ../../../weed && go build -o weed_binary . |
|||
@chmod +x $(WEED_BINARY) |
|||
@echo "✅ SeaweedFS binary built at $(WEED_BINARY)" |
|||
|
|||
check-deps: build-weed |
|||
@echo "Checking dependencies..." |
|||
@echo "🔍 DEBUG: Checking Go installation..." |
|||
@command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1) |
|||
@echo "🔍 DEBUG: Go version: $$(go version)" |
|||
@echo "🔍 DEBUG: Checking binary at $(WEED_BINARY)..." |
|||
@test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1) |
|||
@echo "🔍 DEBUG: Binary size: $$(ls -lh $(WEED_BINARY) | awk '{print $$5}')" |
|||
@echo "🔍 DEBUG: Binary permissions: $$(ls -la $(WEED_BINARY) | awk '{print $$1}')" |
|||
@echo "🔍 DEBUG: Checking Go module dependencies..." |
|||
@go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1) |
|||
@go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1) |
|||
@echo "✅ All dependencies are available" |
|||
|
|||
# Start SeaweedFS server for testing
|
|||
start-server: check-deps |
|||
@echo "Starting SeaweedFS server..." |
|||
@echo "🔍 DEBUG: Current working directory: $$(pwd)" |
|||
@echo "🔍 DEBUG: Checking for existing weed processes..." |
|||
@ps aux | grep weed | grep -v grep || echo "No existing weed processes found" |
|||
@echo "🔍 DEBUG: Cleaning up any existing PID file..." |
|||
@rm -f weed-server.pid |
|||
@echo "🔍 DEBUG: Checking for port conflicts..." |
|||
@if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \
|
|||
echo "⚠️ Port $(S3_PORT) is already in use, trying to find the process..."; \
|
|||
netstat -tlnp 2>/dev/null | grep $(S3_PORT) || true; \
|
|||
else \
|
|||
echo "✅ Port $(S3_PORT) is available"; \
|
|||
fi |
|||
@echo "🔍 DEBUG: Checking binary at $(WEED_BINARY)" |
|||
@ls -la $(WEED_BINARY) || (echo "❌ Binary not found!" && exit 1) |
|||
@echo "🔍 DEBUG: Checking config file at ../../../docker/compose/s3.json" |
|||
@ls -la ../../../docker/compose/s3.json || echo "⚠️ Config file not found, continuing without it" |
|||
@echo "🔍 DEBUG: Creating volume directory..." |
|||
@mkdir -p ./test-volume-data |
|||
@echo "🔍 DEBUG: Launching SeaweedFS server in background..." |
|||
@echo "🔍 DEBUG: Command: $(WEED_BINARY) server -debug -s3 -s3.port=$(S3_PORT) -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../../../docker/compose/s3.json -filer -filer.maxMB=64 -master.volumeSizeLimitMB=50 -volume.max=100 -dir=./test-volume-data -volume.preStopSeconds=1 -metricsPort=9324" |
|||
@$(WEED_BINARY) server \
|
|||
-debug \
|
|||
-s3 \
|
|||
-s3.port=$(S3_PORT) \
|
|||
-s3.allowEmptyFolder=false \
|
|||
-s3.allowDeleteBucketNotEmpty=true \
|
|||
-s3.config=../../../docker/compose/s3.json \
|
|||
-filer \
|
|||
-filer.maxMB=64 \
|
|||
-master.volumeSizeLimitMB=50 \
|
|||
-volume.max=100 \
|
|||
-dir=./test-volume-data \
|
|||
-volume.preStopSeconds=1 \
|
|||
-metricsPort=9324 \
|
|||
> weed-test.log 2>&1 & echo $$! > weed-server.pid |
|||
@echo "🔍 DEBUG: Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')" |
|||
@echo "🔍 DEBUG: Checking if PID is still running..." |
|||
@sleep 2 |
|||
@if [ -f weed-server.pid ]; then \
|
|||
SERVER_PID=$$(cat weed-server.pid); \
|
|||
ps -p $$SERVER_PID || echo "⚠️ Server PID $$SERVER_PID not found after 2 seconds"; \
|
|||
else \
|
|||
echo "⚠️ PID file not found"; \
|
|||
fi |
|||
@echo "🔍 DEBUG: Waiting for server to start (up to 90 seconds)..." |
|||
@for i in $$(seq 1 90); do \
|
|||
echo "🔍 DEBUG: Attempt $$i/90 - checking port $(S3_PORT)"; \
|
|||
if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \
|
|||
echo "✅ SeaweedFS server started successfully on port $(S3_PORT) after $$i seconds"; \
|
|||
exit 0; \
|
|||
fi; \
|
|||
if [ $$i -eq 5 ]; then \
|
|||
echo "🔍 DEBUG: After 5 seconds, checking process and logs..."; \
|
|||
ps aux | grep weed | grep -v grep || echo "No weed processes found"; \
|
|||
if [ -f weed-test.log ]; then \
|
|||
echo "=== First server logs ==="; \
|
|||
head -20 weed-test.log; \
|
|||
fi; \
|
|||
fi; \
|
|||
if [ $$i -eq 15 ]; then \
|
|||
echo "🔍 DEBUG: After 15 seconds, checking port bindings..."; \
|
|||
netstat -tlnp 2>/dev/null | grep $(S3_PORT) || echo "Port $(S3_PORT) not bound"; \
|
|||
netstat -tlnp 2>/dev/null | grep 9333 || echo "Port 9333 not bound"; \
|
|||
netstat -tlnp 2>/dev/null | grep 8080 || echo "Port 8080 not bound"; \
|
|||
fi; \
|
|||
if [ $$i -eq 30 ]; then \
|
|||
echo "⚠️ Server taking longer than expected (30s), checking logs..."; \
|
|||
if [ -f weed-test.log ]; then \
|
|||
echo "=== Recent server logs ==="; \
|
|||
tail -20 weed-test.log; \
|
|||
fi; \
|
|||
fi; \
|
|||
sleep 1; \
|
|||
done; \
|
|||
echo "❌ Server failed to start within 90 seconds"; \
|
|||
echo "🔍 DEBUG: Final process check:"; \
|
|||
ps aux | grep weed | grep -v grep || echo "No weed processes found"; \
|
|||
echo "🔍 DEBUG: Final port check:"; \
|
|||
netstat -tlnp 2>/dev/null | grep -E "(8333|9333|8080)" || echo "No ports bound"; \
|
|||
echo "=== Full server logs ==="; \
|
|||
if [ -f weed-test.log ]; then \
|
|||
cat weed-test.log; \
|
|||
else \
|
|||
echo "No log file found"; \
|
|||
fi; \
|
|||
exit 1 |
|||
|
|||
# Stop SeaweedFS server
|
|||
stop-server: |
|||
@echo "Stopping SeaweedFS server..." |
|||
@if [ -f weed-server.pid ]; then \
|
|||
SERVER_PID=$$(cat weed-server.pid); \
|
|||
echo "Killing server PID $$SERVER_PID"; \
|
|||
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
|
|||
kill -TERM $$SERVER_PID 2>/dev/null || true; \
|
|||
sleep 2; \
|
|||
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
|
|||
echo "Process still running, sending KILL signal..."; \
|
|||
kill -KILL $$SERVER_PID 2>/dev/null || true; \
|
|||
sleep 1; \
|
|||
fi; \
|
|||
else \
|
|||
echo "Process $$SERVER_PID not found (already stopped)"; \
|
|||
fi; \
|
|||
rm -f weed-server.pid; \
|
|||
else \
|
|||
echo "No PID file found, checking for running processes..."; \
|
|||
echo "⚠️ Skipping automatic process cleanup to avoid CI issues"; \
|
|||
echo "Note: Any remaining weed processes should be cleaned up by the CI environment"; \
|
|||
fi |
|||
@echo "✅ SeaweedFS server stopped" |
|||
|
|||
# Show server logs
|
|||
logs: |
|||
@if test -f weed-test.log; then \
|
|||
echo "=== SeaweedFS Server Logs ==="; \
|
|||
tail -f weed-test.log; \
|
|||
else \
|
|||
echo "No log file found. Server may not be running."; \
|
|||
fi |
|||
|
|||
# Core retention tests (basic functionality)
|
|||
test-retention-quick: check-deps |
|||
@echo "Running core S3 retention tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow" . |
|||
@echo "✅ Core retention tests completed" |
|||
|
|||
# All retention tests (comprehensive)
|
|||
test-retention: check-deps |
|||
@echo "Running all S3 retention tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . |
|||
@echo "✅ All retention tests completed" |
|||
|
|||
# WORM integration tests
|
|||
test-retention-worm: check-deps |
|||
@echo "Running WORM integration tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" . |
|||
@echo "✅ WORM integration tests completed" |
|||
|
|||
# Comprehensive retention tests (all features)
|
|||
test-retention-comprehensive: check-deps |
|||
@echo "Running comprehensive S3 retention tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetention|TestObjectLock|TestLegalHold|TestWORM" . |
|||
@echo "✅ Comprehensive retention tests completed" |
|||
|
|||
# All tests without server management
|
|||
test-retention-simple: check-deps |
|||
@echo "Running retention tests (assuming server is already running)..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) . |
|||
@echo "✅ All retention tests completed" |
|||
|
|||
# Start server, run tests, stop server
|
|||
test-with-server: start-server |
|||
@echo "Running retention tests with managed server..." |
|||
@sleep 5 # Give server time to fully start |
|||
@make test-retention-comprehensive || (echo "Tests failed, stopping server..." && make stop-server && exit 1) |
|||
@make stop-server |
|||
@echo "✅ All tests completed with managed server" |
|||
|
|||
# Health check
|
|||
health-check: |
|||
@echo "Checking server health..." |
|||
@if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \
|
|||
echo "✅ Server is accessible on port $(S3_PORT)"; \
|
|||
else \
|
|||
echo "❌ Server is not accessible on port $(S3_PORT)"; \
|
|||
exit 1; \
|
|||
fi |
|||
|
|||
# Clean up
|
|||
clean: |
|||
@echo "Cleaning up test artifacts..." |
|||
@make stop-server |
|||
@rm -f weed-test.log |
|||
@rm -f weed-server.pid |
|||
@rm -rf ./test-volume-data |
|||
@echo "✅ Cleanup completed" |
|||
|
|||
# Individual test targets for specific functionality
|
|||
test-basic-retention: |
|||
@echo "Running basic retention tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" . |
|||
|
|||
test-compliance-retention: |
|||
@echo "Running compliance retention tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionModeCompliance" . |
|||
|
|||
test-legal-hold: |
|||
@echo "Running legal hold tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestLegalHoldWorkflow" . |
|||
|
|||
test-object-lock-config: |
|||
@echo "Running object lock configuration tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestObjectLockConfiguration" . |
|||
|
|||
test-retention-versions: |
|||
@echo "Running retention with versions tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithVersions" . |
|||
|
|||
test-retention-combination: |
|||
@echo "Running retention and legal hold combination tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionAndLegalHoldCombination" . |
|||
|
|||
test-expired-retention: |
|||
@echo "Running expired retention tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestExpiredRetention" . |
|||
|
|||
test-retention-errors: |
|||
@echo "Running retention error case tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionErrorCases" . |
|||
|
|||
# WORM-specific test targets
|
|||
test-worm-integration: |
|||
@echo "Running WORM integration tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMRetentionIntegration" . |
|||
|
|||
test-worm-legacy: |
|||
@echo "Running WORM legacy compatibility tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMLegacyCompatibility" . |
|||
|
|||
test-retention-overwrite: |
|||
@echo "Running retention overwrite protection tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionOverwriteProtection" . |
|||
|
|||
test-retention-bulk: |
|||
@echo "Running retention bulk operations tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBulkOperations" . |
|||
|
|||
test-retention-multipart: |
|||
@echo "Running retention multipart upload tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithMultipartUpload" . |
|||
|
|||
test-retention-extended-attrs: |
|||
@echo "Running retention extended attributes tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionExtendedAttributes" . |
|||
|
|||
test-retention-defaults: |
|||
@echo "Running retention bucket defaults tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBucketDefaults" . |
|||
|
|||
test-retention-concurrent: |
|||
@echo "Running retention concurrent operations tests..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionConcurrentOperations" . |
|||
|
|||
# Development targets
|
|||
dev-start: start-server |
|||
@echo "Development server started. Access S3 API at http://localhost:$(S3_PORT)" |
|||
@echo "To stop: make stop-server" |
|||
|
|||
dev-test: check-deps |
|||
@echo "Running tests in development mode..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" . |
|||
|
|||
# CI targets
|
|||
ci-test: check-deps |
|||
@echo "Running tests in CI mode..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -race . |
|||
|
|||
# All targets
|
|||
test-all: test-retention test-retention-worm |
|||
@echo "✅ All S3 retention tests completed" |
|||
|
|||
# Benchmark targets
|
|||
benchmark-retention: |
|||
@echo "Running retention performance benchmarks..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -bench=. -benchmem . |
|||
|
|||
# Coverage targets
|
|||
coverage: |
|||
@echo "Running tests with coverage..." |
|||
@go test -v -timeout=$(TEST_TIMEOUT) -coverprofile=coverage.out . |
|||
@go tool cover -html=coverage.out -o coverage.html |
|||
@echo "Coverage report generated: coverage.html" |
|||
|
|||
# Format and lint
|
|||
fmt: |
|||
@echo "Formatting Go code..." |
|||
@go fmt . |
|||
|
|||
lint: |
|||
@echo "Running linter..." |
|||
@golint . || echo "golint not available, skipping..." |
|||
|
|||
# Install dependencies for development
|
|||
install-deps: |
|||
@echo "Installing Go dependencies..." |
|||
@go mod tidy |
|||
@go mod download |
|||
|
|||
# Show current configuration
|
|||
show-config: |
|||
@echo "Current configuration:" |
|||
@echo " WEED_BINARY: $(WEED_BINARY)" |
|||
@echo " S3_PORT: $(S3_PORT)" |
|||
@echo " TEST_TIMEOUT: $(TEST_TIMEOUT)" |
|||
@echo " TEST_PATTERN: $(TEST_PATTERN)" |
@ -0,0 +1,264 @@ |
|||
# SeaweedFS S3 Object Retention Tests |
|||
|
|||
This directory contains comprehensive tests for SeaweedFS S3 Object Retention functionality, including Object Lock, Legal Hold, and WORM (Write Once Read Many) capabilities. |
|||
|
|||
## Overview |
|||
|
|||
The test suite validates AWS S3-compatible object retention features including: |
|||
|
|||
- **Object Retention**: GOVERNANCE and COMPLIANCE modes with retain-until-date |
|||
- **Legal Hold**: Independent protection that can be applied/removed |
|||
- **Object Lock Configuration**: Bucket-level default retention policies |
|||
- **WORM Integration**: Compatibility with legacy WORM functionality |
|||
- **Version-specific Retention**: Different retention policies per object version |
|||
- **Enforcement**: Protection against deletion and overwriting |
|||
|
|||
## Test Files |
|||
|
|||
- `s3_retention_test.go` - Core retention functionality tests |
|||
- `s3_worm_integration_test.go` - WORM integration and advanced scenarios |
|||
- `test_config.json` - Test configuration (endpoints, credentials) |
|||
- `Makefile` - Comprehensive test automation |
|||
- `go.mod` - Go module dependencies |
|||
|
|||
## Prerequisites |
|||
|
|||
- Go 1.21 or later |
|||
- SeaweedFS binary built (`make build-weed`) |
|||
- AWS SDK Go v2 |
|||
- Testify testing framework |
|||
|
|||
## Quick Start |
|||
|
|||
### 1. Build and Start Server |
|||
```bash |
|||
# Build SeaweedFS and start test server |
|||
make start-server |
|||
``` |
|||
|
|||
### 2. Run Tests |
|||
```bash |
|||
# Run core retention tests |
|||
make test-retention-quick |
|||
|
|||
# Run all retention tests |
|||
make test-retention |
|||
|
|||
# Run WORM integration tests |
|||
make test-retention-worm |
|||
|
|||
# Run all tests with managed server |
|||
make test-with-server |
|||
``` |
|||
|
|||
### 3. Cleanup |
|||
```bash |
|||
make clean |
|||
``` |
|||
|
|||
## Test Categories |
|||
|
|||
### Core Retention Tests |
|||
- `TestBasicRetentionWorkflow` - Basic GOVERNANCE mode retention |
|||
- `TestRetentionModeCompliance` - COMPLIANCE mode (immutable) |
|||
- `TestLegalHoldWorkflow` - Legal hold on/off functionality |
|||
- `TestObjectLockConfiguration` - Bucket object lock settings |
|||
|
|||
### Advanced Tests |
|||
- `TestRetentionWithVersions` - Version-specific retention policies |
|||
- `TestRetentionAndLegalHoldCombination` - Multiple protection types |
|||
- `TestExpiredRetention` - Post-expiration behavior |
|||
- `TestRetentionErrorCases` - Error handling and edge cases |
|||
|
|||
### WORM Integration Tests |
|||
- `TestWORMRetentionIntegration` - New retention + legacy WORM |
|||
- `TestWORMLegacyCompatibility` - Backward compatibility |
|||
- `TestRetentionOverwriteProtection` - Prevent overwrites |
|||
- `TestRetentionBulkOperations` - Bulk delete with retention |
|||
- `TestRetentionWithMultipartUpload` - Multipart upload retention |
|||
- `TestRetentionExtendedAttributes` - Extended attribute storage |
|||
- `TestRetentionBucketDefaults` - Default retention application |
|||
- `TestRetentionConcurrentOperations` - Concurrent operation safety |
|||
|
|||
## Individual Test Targets |
|||
|
|||
Run specific test categories: |
|||
|
|||
```bash |
|||
# Basic functionality |
|||
make test-basic-retention |
|||
make test-compliance-retention |
|||
make test-legal-hold |
|||
|
|||
# Advanced features |
|||
make test-retention-versions |
|||
make test-retention-combination |
|||
make test-expired-retention |
|||
|
|||
# WORM integration |
|||
make test-worm-integration |
|||
make test-worm-legacy |
|||
make test-retention-bulk |
|||
``` |
|||
|
|||
## Configuration |
|||
|
|||
### Server Configuration |
|||
The tests use these default settings: |
|||
- S3 Port: 8333 |
|||
- Test timeout: 15 minutes |
|||
- Volume directory: `./test-volume-data` |
|||
|
|||
### Test Configuration (`test_config.json`) |
|||
```json |
|||
{ |
|||
"endpoint": "http://localhost:8333", |
|||
"access_key": "some_access_key1", |
|||
"secret_key": "some_secret_key1", |
|||
"region": "us-east-1", |
|||
"bucket_prefix": "test-retention-", |
|||
"use_ssl": false, |
|||
"skip_verify_ssl": true |
|||
} |
|||
``` |
|||
|
|||
## Expected Behavior |
|||
|
|||
### GOVERNANCE Mode |
|||
- Objects protected until retain-until-date |
|||
- Can be bypassed with `x-amz-bypass-governance-retention` header |
|||
- Supports time extension (not reduction) |
|||
|
|||
### COMPLIANCE Mode |
|||
- Objects immutably protected until retain-until-date |
|||
- Cannot be bypassed or shortened |
|||
- Strictest protection level |
|||
|
|||
### Legal Hold |
|||
- Independent ON/OFF protection |
|||
- Can coexist with retention policies |
|||
- Must be explicitly removed to allow deletion |
|||
|
|||
### Version Support |
|||
- Each object version can have individual retention |
|||
- Applies to both versioned and non-versioned buckets |
|||
- Version-specific retention retrieval |
|||
|
|||
## Development |
|||
|
|||
### Running in Development Mode |
|||
```bash |
|||
# Start server for development |
|||
make dev-start |
|||
|
|||
# Run quick test |
|||
make dev-test |
|||
``` |
|||
|
|||
### Code Quality |
|||
```bash |
|||
# Format code |
|||
make fmt |
|||
|
|||
# Run linter |
|||
make lint |
|||
|
|||
# Generate coverage report |
|||
make coverage |
|||
``` |
|||
|
|||
### Performance Testing |
|||
```bash |
|||
# Run benchmarks |
|||
make benchmark-retention |
|||
``` |
|||
|
|||
## Troubleshooting |
|||
|
|||
### Server Won't Start |
|||
```bash |
|||
# Check if port is in use |
|||
netstat -tlnp | grep 8333 |
|||
|
|||
# View server logs |
|||
make logs |
|||
|
|||
# Force cleanup |
|||
make clean |
|||
``` |
|||
|
|||
### Test Failures |
|||
```bash |
|||
# Run with verbose output |
|||
go test -v -timeout=15m . |
|||
|
|||
# Run specific test |
|||
go test -v -run TestBasicRetentionWorkflow . |
|||
|
|||
# Check server health |
|||
make health-check |
|||
``` |
|||
|
|||
### Dependencies |
|||
```bash |
|||
# Install/update dependencies |
|||
make install-deps |
|||
|
|||
# Check dependency status |
|||
make check-deps |
|||
``` |
|||
|
|||
## Integration with SeaweedFS |
|||
|
|||
These tests validate the retention implementation in: |
|||
- `weed/s3api/s3api_object_retention.go` - Core retention logic |
|||
- `weed/s3api/s3api_object_handlers_retention.go` - HTTP handlers |
|||
- `weed/s3api/s3_constants/extend_key.go` - Extended attribute keys |
|||
- `weed/s3api/s3err/s3api_errors.go` - Error definitions |
|||
- `weed/s3api/s3api_object_handlers_delete.go` - Deletion enforcement |
|||
- `weed/s3api/s3api_object_handlers_put.go` - Upload enforcement |
|||
|
|||
## AWS CLI Compatibility |
|||
|
|||
The retention implementation supports standard AWS CLI commands: |
|||
|
|||
```bash |
|||
# Set object retention |
|||
aws s3api put-object-retention \ |
|||
--bucket mybucket \ |
|||
--key myobject \ |
|||
--retention Mode=GOVERNANCE,RetainUntilDate=2024-12-31T23:59:59Z |
|||
|
|||
# Get object retention |
|||
aws s3api get-object-retention \ |
|||
--bucket mybucket \ |
|||
--key myobject |
|||
|
|||
# Set legal hold |
|||
aws s3api put-object-legal-hold \ |
|||
--bucket mybucket \ |
|||
--key myobject \ |
|||
--legal-hold Status=ON |
|||
|
|||
# Configure bucket object lock |
|||
aws s3api put-object-lock-configuration \ |
|||
--bucket mybucket \ |
|||
--object-lock-configuration ObjectLockEnabled=Enabled,Rule='{DefaultRetention={Mode=GOVERNANCE,Days=30}}' |
|||
``` |
|||
|
|||
## Contributing |
|||
|
|||
When adding new retention tests: |
|||
|
|||
1. Follow existing test patterns |
|||
2. Use descriptive test names |
|||
3. Include both positive and negative test cases |
|||
4. Test error conditions |
|||
5. Update this README with new test descriptions |
|||
6. Add appropriate Makefile targets for new test categories |
|||
|
|||
## References |
|||
|
|||
- [AWS S3 Object Lock Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) |
|||
- [AWS S3 API Reference - Object Retention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html) |
|||
- [SeaweedFS S3 API Documentation](https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API) |
@ -0,0 +1,31 @@ |
|||
module github.com/seaweedfs/seaweedfs/test/s3/retention |
|||
|
|||
go 1.21 |
|||
|
|||
require ( |
|||
github.com/aws/aws-sdk-go-v2 v1.21.2 |
|||
github.com/aws/aws-sdk-go-v2/config v1.18.45 |
|||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 |
|||
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 |
|||
github.com/stretchr/testify v1.8.4 |
|||
) |
|||
|
|||
require ( |
|||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect |
|||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect |
|||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect |
|||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect |
|||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect |
|||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 // indirect |
|||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 // indirect |
|||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 // indirect |
|||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect |
|||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 // indirect |
|||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect |
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect |
|||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect |
|||
github.com/aws/smithy-go v1.15.0 // indirect |
|||
github.com/davecgh/go-spew v1.1.1 // indirect |
|||
github.com/pmezard/go-difflib v1.0.0 // indirect |
|||
gopkg.in/yaml.v3 v3.0.1 // indirect |
|||
) |
@ -0,0 +1,62 @@ |
|||
github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= |
|||
github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= |
|||
github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= |
|||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= |
|||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= |
|||
github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes= |
|||
github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= |
|||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8= |
|||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= |
|||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ= |
|||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= |
|||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= |
|||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y= |
|||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= |
|||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= |
|||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc= |
|||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= |
|||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= |
|||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= |
|||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo= |
|||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 h1:wmGLw2i8ZTlHLw7a9ULGfQbuccw8uIiNr6sol5bFzc8= |
|||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6/go.mod h1:Q0Hq2X/NuL7z8b1Dww8rmOFl+jzusKEcyvkKspwdpyc= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14/go.mod h1:dDilntgHy9WnHXsh7dDtUPgHKEfTJIBUTHM8OWm0f/0= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 h1:7R8uRYyXzdD71KWVCL78lJZltah6VVznXBazvKjfH58= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15/go.mod h1:26SQUPcTNgV1Tapwdt4a1rOsYRsnBsJHLMPoxK2b0d8= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 h1:skaFGzv+3kA+v2BPKhuekeb1Hbb105+44r8ASC+q5SE= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38/go.mod h1:epIZoRSSbRIwLPJU5F+OldHhwZPBdpDeQkRdCeY3+00= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 h1:9ulSU5ClouoPIYhDQdg9tpl83d5Yb91PXTKK+17q+ow= |
|||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6/go.mod h1:lnc2taBsR9nTlz9meD+lhFZZ9EWY712QHrRflWpTcOA= |
|||
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4= |
|||
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM= |
|||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k= |
|||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= |
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk= |
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= |
|||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU= |
|||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= |
|||
github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= |
|||
github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= |
|||
github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= |
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= |
|||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= |
|||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= |
|||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= |
|||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
|||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= |
|||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= |
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= |
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
|||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
|||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= |
|||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
@ -0,0 +1,627 @@ |
|||
package s3api |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"strings" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/aws/aws-sdk-go-v2/aws" |
|||
"github.com/aws/aws-sdk-go-v2/config" |
|||
"github.com/aws/aws-sdk-go-v2/credentials" |
|||
"github.com/aws/aws-sdk-go-v2/service/s3" |
|||
"github.com/aws/aws-sdk-go-v2/service/s3/types" |
|||
"github.com/stretchr/testify/assert" |
|||
"github.com/stretchr/testify/require" |
|||
) |
|||
|
|||
// S3TestConfig holds configuration for S3 tests
|
|||
type S3TestConfig struct { |
|||
Endpoint string |
|||
AccessKey string |
|||
SecretKey string |
|||
Region string |
|||
BucketPrefix string |
|||
UseSSL bool |
|||
SkipVerifySSL bool |
|||
} |
|||
|
|||
// Default test configuration - should match test_config.json
|
|||
var defaultConfig = &S3TestConfig{ |
|||
Endpoint: "http://localhost:8333", // Default SeaweedFS S3 port
|
|||
AccessKey: "some_access_key1", |
|||
SecretKey: "some_secret_key1", |
|||
Region: "us-east-1", |
|||
BucketPrefix: "test-retention-", |
|||
UseSSL: false, |
|||
SkipVerifySSL: true, |
|||
} |
|||
|
|||
// getS3Client creates an AWS S3 client for testing
|
|||
func getS3Client(t *testing.T) *s3.Client { |
|||
cfg, err := config.LoadDefaultConfig(context.TODO(), |
|||
config.WithRegion(defaultConfig.Region), |
|||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( |
|||
defaultConfig.AccessKey, |
|||
defaultConfig.SecretKey, |
|||
"", |
|||
)), |
|||
config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc( |
|||
func(service, region string, options ...interface{}) (aws.Endpoint, error) { |
|||
return aws.Endpoint{ |
|||
URL: defaultConfig.Endpoint, |
|||
SigningRegion: defaultConfig.Region, |
|||
HostnameImmutable: true, |
|||
}, nil |
|||
})), |
|||
) |
|||
require.NoError(t, err) |
|||
|
|||
return s3.NewFromConfig(cfg, func(o *s3.Options) { |
|||
o.UsePathStyle = true // Important for SeaweedFS
|
|||
}) |
|||
} |
|||
|
|||
// getNewBucketName generates a unique bucket name
|
|||
func getNewBucketName() string { |
|||
timestamp := time.Now().UnixNano() |
|||
return fmt.Sprintf("%s%d", defaultConfig.BucketPrefix, timestamp) |
|||
} |
|||
|
|||
// createBucket creates a new bucket for testing
|
|||
func createBucket(t *testing.T, client *s3.Client, bucketName string) { |
|||
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ |
|||
Bucket: aws.String(bucketName), |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// deleteBucket deletes a bucket and all its contents
|
|||
func deleteBucket(t *testing.T, client *s3.Client, bucketName string) { |
|||
// First, delete all objects and versions
|
|||
err := deleteAllObjectVersions(t, client, bucketName) |
|||
if err != nil { |
|||
t.Logf("Warning: failed to delete all object versions: %v", err) |
|||
} |
|||
|
|||
// Then delete the bucket
|
|||
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ |
|||
Bucket: aws.String(bucketName), |
|||
}) |
|||
if err != nil { |
|||
t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err) |
|||
} |
|||
} |
|||
|
|||
// deleteAllObjectVersions deletes all object versions in a bucket
|
|||
func deleteAllObjectVersions(t *testing.T, client *s3.Client, bucketName string) error { |
|||
// List all object versions
|
|||
paginator := s3.NewListObjectVersionsPaginator(client, &s3.ListObjectVersionsInput{ |
|||
Bucket: aws.String(bucketName), |
|||
}) |
|||
|
|||
for paginator.HasMorePages() { |
|||
page, err := paginator.NextPage(context.TODO()) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
var objectsToDelete []types.ObjectIdentifier |
|||
|
|||
// Add versions
|
|||
for _, version := range page.Versions { |
|||
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ |
|||
Key: version.Key, |
|||
VersionId: version.VersionId, |
|||
}) |
|||
} |
|||
|
|||
// Add delete markers
|
|||
for _, deleteMarker := range page.DeleteMarkers { |
|||
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ |
|||
Key: deleteMarker.Key, |
|||
VersionId: deleteMarker.VersionId, |
|||
}) |
|||
} |
|||
|
|||
// Delete objects in batches
|
|||
if len(objectsToDelete) > 0 { |
|||
_, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Delete: &types.Delete{ |
|||
Objects: objectsToDelete, |
|||
Quiet: true, |
|||
}, |
|||
}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// enableVersioning enables versioning on a bucket
|
|||
func enableVersioning(t *testing.T, client *s3.Client, bucketName string) { |
|||
_, err := client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ |
|||
Bucket: aws.String(bucketName), |
|||
VersioningConfiguration: &types.VersioningConfiguration{ |
|||
Status: types.BucketVersioningStatusEnabled, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// putObject puts an object into a bucket
|
|||
func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput { |
|||
resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Body: strings.NewReader(content), |
|||
}) |
|||
require.NoError(t, err) |
|||
return resp |
|||
} |
|||
|
|||
// TestBasicRetentionWorkflow tests the basic retention functionality
|
|||
func TestBasicRetentionWorkflow(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
|
|||
// Enable versioning (required for retention)
|
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "test-object" |
|||
content := "test content for retention" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set retention with GOVERNANCE mode
|
|||
retentionUntil := time.Now().Add(24 * time.Hour) |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Get retention and verify it was set correctly
|
|||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) |
|||
assert.WithinDuration(t, retentionUntil, *retentionResp.Retention.RetainUntilDate, time.Second) |
|||
|
|||
// Try to delete object without bypass - should fail
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Delete object with bypass governance - should succeed
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
BypassGovernanceRetention: true, |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// TestRetentionModeCompliance tests COMPLIANCE mode retention
|
|||
func TestRetentionModeCompliance(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "compliance-test-object" |
|||
content := "compliance test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set retention with COMPLIANCE mode
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeCompliance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Get retention and verify
|
|||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockRetentionModeCompliance, retentionResp.Retention.Mode) |
|||
|
|||
// Try to delete object with bypass - should still fail (compliance mode)
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
BypassGovernanceRetention: true, |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Try to delete object without bypass - should also fail
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.Error(t, err) |
|||
} |
|||
|
|||
// TestLegalHoldWorkflow tests legal hold functionality
|
|||
func TestLegalHoldWorkflow(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "legal-hold-test-object" |
|||
content := "legal hold test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set legal hold ON
|
|||
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
LegalHold: &types.ObjectLockLegalHold{ |
|||
Status: types.ObjectLockLegalHoldStatusOn, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Get legal hold and verify
|
|||
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status) |
|||
|
|||
// Try to delete object - should fail due to legal hold
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Remove legal hold
|
|||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
LegalHold: &types.ObjectLockLegalHold{ |
|||
Status: types.ObjectLockLegalHoldStatusOff, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Verify legal hold is off
|
|||
legalHoldResp, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockLegalHoldStatusOff, legalHoldResp.LegalHold.Status) |
|||
|
|||
// Now delete should succeed
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// TestObjectLockConfiguration tests object lock configuration
|
|||
func TestObjectLockConfiguration(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Set object lock configuration
|
|||
_, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{ |
|||
Bucket: aws.String(bucketName), |
|||
ObjectLockConfiguration: &types.ObjectLockConfiguration{ |
|||
ObjectLockEnabled: types.ObjectLockEnabledEnabled, |
|||
Rule: &types.ObjectLockRule{ |
|||
DefaultRetention: &types.DefaultRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
Days: 30, |
|||
}, |
|||
}, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Get object lock configuration and verify
|
|||
configResp, err := client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ |
|||
Bucket: aws.String(bucketName), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockEnabledEnabled, configResp.ObjectLockConfiguration.ObjectLockEnabled) |
|||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, configResp.ObjectLockConfiguration.Rule.DefaultRetention.Mode) |
|||
assert.Equal(t, int32(30), configResp.ObjectLockConfiguration.Rule.DefaultRetention.Days) |
|||
} |
|||
|
|||
// TestRetentionWithVersions tests retention with specific object versions
|
|||
func TestRetentionWithVersions(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create multiple versions of the same object
|
|||
key := "versioned-retention-test" |
|||
content1 := "version 1 content" |
|||
content2 := "version 2 content" |
|||
|
|||
putResp1 := putObject(t, client, bucketName, key, content1) |
|||
require.NotNil(t, putResp1.VersionId) |
|||
|
|||
putResp2 := putObject(t, client, bucketName, key, content2) |
|||
require.NotNil(t, putResp2.VersionId) |
|||
|
|||
// Set retention on first version only
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
VersionId: putResp1.VersionId, |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Get retention for first version
|
|||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
VersionId: putResp1.VersionId, |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) |
|||
|
|||
// Try to get retention for second version - should fail (no retention set)
|
|||
_, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
VersionId: putResp2.VersionId, |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Delete second version should succeed (no retention)
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
VersionId: putResp2.VersionId, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Delete first version should fail (has retention)
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
VersionId: putResp1.VersionId, |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Delete first version with bypass should succeed
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
VersionId: putResp1.VersionId, |
|||
BypassGovernanceRetention: true, |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// TestRetentionAndLegalHoldCombination tests retention and legal hold together
|
|||
func TestRetentionAndLegalHoldCombination(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "combined-protection-test" |
|||
content := "combined protection test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set both retention and legal hold
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
|
|||
// Set retention
|
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Set legal hold
|
|||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
LegalHold: &types.ObjectLockLegalHold{ |
|||
Status: types.ObjectLockLegalHoldStatusOn, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Try to delete with bypass governance - should still fail due to legal hold
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
BypassGovernanceRetention: true, |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Remove legal hold
|
|||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
LegalHold: &types.ObjectLockLegalHold{ |
|||
Status: types.ObjectLockLegalHoldStatusOff, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Now delete with bypass governance should succeed
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
BypassGovernanceRetention: true, |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// TestExpiredRetention tests that objects can be deleted after retention expires
|
|||
func TestExpiredRetention(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "expired-retention-test" |
|||
content := "expired retention test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set retention for a very short time (2 seconds)
|
|||
retentionUntil := time.Now().Add(2 * time.Second) |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Try to delete immediately - should fail
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Wait for retention to expire
|
|||
time.Sleep(3 * time.Second) |
|||
|
|||
// Now delete should succeed
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// TestRetentionErrorCases tests various error conditions
|
|||
func TestRetentionErrorCases(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Test setting retention on non-existent object
|
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String("non-existent-key"), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(time.Now().Add(1 * time.Hour)), |
|||
}, |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Test getting retention on non-existent object
|
|||
_, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String("non-existent-key"), |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Test setting legal hold on non-existent object
|
|||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String("non-existent-key"), |
|||
LegalHold: &types.ObjectLockLegalHold{ |
|||
Status: types.ObjectLockLegalHoldStatusOn, |
|||
}, |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Test getting legal hold on non-existent object
|
|||
_, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String("non-existent-key"), |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Test setting retention with past date
|
|||
key := "retention-past-date-test" |
|||
content := "test content" |
|||
putObject(t, client, bucketName, key, content) |
|||
|
|||
pastDate := time.Now().Add(-1 * time.Hour) |
|||
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(pastDate), |
|||
}, |
|||
}) |
|||
require.Error(t, err) |
|||
} |
@ -0,0 +1,455 @@ |
|||
package s3api |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"strings" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/aws/aws-sdk-go-v2/aws" |
|||
"github.com/aws/aws-sdk-go-v2/service/s3" |
|||
"github.com/aws/aws-sdk-go-v2/service/s3/types" |
|||
"github.com/stretchr/testify/assert" |
|||
"github.com/stretchr/testify/require" |
|||
) |
|||
|
|||
// TestWORMRetentionIntegration tests that both retention and legacy WORM work together
|
|||
func TestWORMRetentionIntegration(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "worm-retention-integration-test" |
|||
content := "worm retention integration test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set retention (new system)
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Try to delete - should fail due to retention
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Delete with bypass should succeed
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
BypassGovernanceRetention: true, |
|||
}) |
|||
require.NoError(t, err) |
|||
} |
|||
|
|||
// TestWORMLegacyCompatibility tests that legacy WORM functionality still works
|
|||
func TestWORMLegacyCompatibility(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object with legacy WORM headers (if supported)
|
|||
key := "legacy-worm-test" |
|||
content := "legacy worm test content" |
|||
|
|||
// Try to create object with legacy WORM TTL header
|
|||
putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Body: strings.NewReader(content), |
|||
// Add legacy WORM headers if supported
|
|||
Metadata: map[string]string{ |
|||
"x-amz-meta-worm-ttl": fmt.Sprintf("%d", time.Now().Add(1*time.Hour).Unix()), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Object should be created successfully
|
|||
resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.NotNil(t, resp.Metadata) |
|||
} |
|||
|
|||
// TestRetentionOverwriteProtection tests that retention prevents overwriting
|
|||
func TestRetentionOverwriteProtection(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket but DON'T enable versioning (to test overwrite protection)
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "overwrite-protection-test" |
|||
content1 := "original content" |
|||
putResp1 := putObject(t, client, bucketName, key, content1) |
|||
require.NotNil(t, putResp1.ETag) |
|||
|
|||
// Enable versioning after creating object
|
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Set retention on the object
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Try to overwrite object - should fail in non-versioned bucket context
|
|||
content2 := "new content" |
|||
_, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Body: strings.NewReader(content2), |
|||
}) |
|||
// Note: In a real scenario, this might fail or create a new version
|
|||
// The actual behavior depends on the implementation
|
|||
if err != nil { |
|||
t.Logf("Expected behavior: overwrite blocked due to retention: %v", err) |
|||
} else { |
|||
t.Logf("Overwrite allowed, likely created new version") |
|||
} |
|||
} |
|||
|
|||
// TestRetentionBulkOperations tests retention with bulk operations
|
|||
func TestRetentionBulkOperations(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create multiple objects with retention
|
|||
var objectsToDelete []types.ObjectIdentifier |
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
|
|||
for i := 0; i < 3; i++ { |
|||
key := fmt.Sprintf("bulk-test-object-%d", i) |
|||
content := fmt.Sprintf("bulk test content %d", i) |
|||
|
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set retention on each object
|
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ |
|||
Key: aws.String(key), |
|||
VersionId: putResp.VersionId, |
|||
}) |
|||
} |
|||
|
|||
// Try bulk delete without bypass - should fail
|
|||
_, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Delete: &types.Delete{ |
|||
Objects: objectsToDelete, |
|||
Quiet: false, |
|||
}, |
|||
}) |
|||
require.Error(t, err) |
|||
|
|||
// Try bulk delete with bypass - should succeed
|
|||
_, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ |
|||
Bucket: aws.String(bucketName), |
|||
BypassGovernanceRetention: true, |
|||
Delete: &types.Delete{ |
|||
Objects: objectsToDelete, |
|||
Quiet: false, |
|||
}, |
|||
}) |
|||
if err != nil { |
|||
t.Logf("Bulk delete with bypass failed (may not be supported): %v", err) |
|||
} else { |
|||
t.Logf("Bulk delete with bypass succeeded") |
|||
} |
|||
} |
|||
|
|||
// TestRetentionWithMultipartUpload tests retention with multipart uploads
|
|||
func TestRetentionWithMultipartUpload(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Start multipart upload
|
|||
key := "multipart-retention-test" |
|||
createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
uploadId := createResp.UploadId |
|||
|
|||
// Upload a part
|
|||
partContent := "This is a test part for multipart upload" |
|||
uploadResp, err := client.UploadPart(context.TODO(), &s3.UploadPartInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
PartNumber: 1, |
|||
UploadId: uploadId, |
|||
Body: strings.NewReader(partContent), |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Complete multipart upload
|
|||
_, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
UploadId: uploadId, |
|||
MultipartUpload: &types.CompletedMultipartUpload{ |
|||
Parts: []types.CompletedPart{ |
|||
{ |
|||
ETag: uploadResp.ETag, |
|||
PartNumber: 1, |
|||
}, |
|||
}, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Set retention on the completed multipart object
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Try to delete - should fail
|
|||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.Error(t, err) |
|||
} |
|||
|
|||
// TestRetentionExtendedAttributes tests that retention uses extended attributes correctly
|
|||
func TestRetentionExtendedAttributes(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "extended-attrs-test" |
|||
content := "extended attributes test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Set retention
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Set legal hold
|
|||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
LegalHold: &types.ObjectLockLegalHold{ |
|||
Status: types.ObjectLockLegalHoldStatusOn, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Get object metadata to verify extended attributes are set
|
|||
resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Check that the object has the expected metadata
|
|||
// Note: The actual metadata keys depend on the implementation
|
|||
assert.NotNil(t, resp.Metadata) |
|||
t.Logf("Object metadata: %+v", resp.Metadata) |
|||
|
|||
// Verify retention can be retrieved
|
|||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) |
|||
|
|||
// Verify legal hold can be retrieved
|
|||
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
require.NoError(t, err) |
|||
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status) |
|||
} |
|||
|
|||
// TestRetentionBucketDefaults tests object lock configuration defaults
|
|||
func TestRetentionBucketDefaults(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Set bucket object lock configuration with default retention
|
|||
_, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{ |
|||
Bucket: aws.String(bucketName), |
|||
ObjectLockConfiguration: &types.ObjectLockConfiguration{ |
|||
ObjectLockEnabled: types.ObjectLockEnabledEnabled, |
|||
Rule: &types.ObjectLockRule{ |
|||
DefaultRetention: &types.DefaultRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
Days: 1, // 1 day default
|
|||
}, |
|||
}, |
|||
}, |
|||
}) |
|||
require.NoError(t, err) |
|||
|
|||
// Create object (should inherit default retention)
|
|||
key := "bucket-defaults-test" |
|||
content := "bucket defaults test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Check if object has default retention applied
|
|||
// Note: This depends on the implementation - some S3 services apply
|
|||
// default retention automatically, others require explicit setting
|
|||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
if err != nil { |
|||
t.Logf("No automatic default retention applied: %v", err) |
|||
} else { |
|||
t.Logf("Default retention applied: %+v", retentionResp.Retention) |
|||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) |
|||
} |
|||
} |
|||
|
|||
// TestRetentionConcurrentOperations tests concurrent retention operations
|
|||
func TestRetentionConcurrentOperations(t *testing.T) { |
|||
client := getS3Client(t) |
|||
bucketName := getNewBucketName() |
|||
|
|||
// Create bucket and enable versioning
|
|||
createBucket(t, client, bucketName) |
|||
defer deleteBucket(t, client, bucketName) |
|||
enableVersioning(t, client, bucketName) |
|||
|
|||
// Create object
|
|||
key := "concurrent-ops-test" |
|||
content := "concurrent operations test content" |
|||
putResp := putObject(t, client, bucketName, key, content) |
|||
require.NotNil(t, putResp.VersionId) |
|||
|
|||
// Test concurrent retention and legal hold operations
|
|||
retentionUntil := time.Now().Add(1 * time.Hour) |
|||
|
|||
// Set retention and legal hold concurrently
|
|||
errChan := make(chan error, 2) |
|||
|
|||
go func() { |
|||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
Retention: &types.ObjectLockRetention{ |
|||
Mode: types.ObjectLockRetentionModeGovernance, |
|||
RetainUntilDate: aws.Time(retentionUntil), |
|||
}, |
|||
}) |
|||
errChan <- err |
|||
}() |
|||
|
|||
go func() { |
|||
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
LegalHold: &types.ObjectLockLegalHold{ |
|||
Status: types.ObjectLockLegalHoldStatusOn, |
|||
}, |
|||
}) |
|||
errChan <- err |
|||
}() |
|||
|
|||
// Wait for both operations to complete
|
|||
for i := 0; i < 2; i++ { |
|||
err := <-errChan |
|||
if err != nil { |
|||
t.Logf("Concurrent operation failed: %v", err) |
|||
} |
|||
} |
|||
|
|||
// Verify both settings are applied
|
|||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
if err == nil { |
|||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) |
|||
} |
|||
|
|||
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ |
|||
Bucket: aws.String(bucketName), |
|||
Key: aws.String(key), |
|||
}) |
|||
if err == nil { |
|||
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status) |
|||
} |
|||
} |
@ -0,0 +1,9 @@ |
|||
{ |
|||
"endpoint": "http://localhost:8333", |
|||
"access_key": "some_access_key1", |
|||
"secret_key": "some_secret_key1", |
|||
"region": "us-east-1", |
|||
"bucket_prefix": "test-retention-", |
|||
"use_ssl": false, |
|||
"skip_verify_ssl": true |
|||
} |
@ -0,0 +1,331 @@ |
|||
package s3api |
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"net/http" |
|||
"strings" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" |
|||
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" |
|||
) |
|||
|
|||
// PutObjectRetentionHandler Put object Retention
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html
|
|||
func (s3a *S3ApiServer) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { |
|||
bucket, object := s3_constants.GetBucketAndObject(r) |
|||
glog.V(3).Infof("PutObjectRetentionHandler %s %s", bucket, object) |
|||
|
|||
// Get version ID from query parameters
|
|||
versionId := r.URL.Query().Get("versionId") |
|||
|
|||
// Check for bypass governance retention header
|
|||
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true" |
|||
|
|||
// Parse retention configuration from request body
|
|||
retention, err := parseObjectRetention(r) |
|||
if err != nil { |
|||
glog.Errorf("PutObjectRetentionHandler: failed to parse retention config: %v", err) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) |
|||
return |
|||
} |
|||
|
|||
// Validate retention configuration
|
|||
if err := validateRetention(retention); err != nil { |
|||
glog.Errorf("PutObjectRetentionHandler: invalid retention config: %v", err) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) |
|||
return |
|||
} |
|||
|
|||
// Set retention on the object
|
|||
if err := s3a.setObjectRetention(bucket, object, versionId, retention, bypassGovernance); err != nil { |
|||
glog.Errorf("PutObjectRetentionHandler: failed to set retention: %v", err) |
|||
|
|||
// Handle specific error cases
|
|||
if strings.Contains(err.Error(), "object not found") || strings.Contains(err.Error(), "version not found") { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) |
|||
return |
|||
} |
|||
|
|||
if strings.Contains(err.Error(), "COMPLIANCE mode") || strings.Contains(err.Error(), "GOVERNANCE mode") { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) |
|||
return |
|||
} |
|||
|
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) |
|||
return |
|||
} |
|||
|
|||
// Record metrics
|
|||
stats_collect.RecordBucketActiveTime(bucket) |
|||
|
|||
// Return success (HTTP 200 with no body)
|
|||
w.WriteHeader(http.StatusOK) |
|||
glog.V(3).Infof("PutObjectRetentionHandler: successfully set retention for %s/%s", bucket, object) |
|||
} |
|||
|
|||
// GetObjectRetentionHandler Get object Retention
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html
|
|||
func (s3a *S3ApiServer) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { |
|||
bucket, object := s3_constants.GetBucketAndObject(r) |
|||
glog.V(3).Infof("GetObjectRetentionHandler %s %s", bucket, object) |
|||
|
|||
// Get version ID from query parameters
|
|||
versionId := r.URL.Query().Get("versionId") |
|||
|
|||
// Get retention configuration for the object
|
|||
retention, err := s3a.getObjectRetention(bucket, object, versionId) |
|||
if err != nil { |
|||
glog.Errorf("GetObjectRetentionHandler: failed to get retention: %v", err) |
|||
|
|||
// Handle specific error cases
|
|||
if strings.Contains(err.Error(), "object not found") || strings.Contains(err.Error(), "version not found") { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) |
|||
return |
|||
} |
|||
|
|||
if strings.Contains(err.Error(), "no retention configuration found") { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLockConfiguration) |
|||
return |
|||
} |
|||
|
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) |
|||
return |
|||
} |
|||
|
|||
// Marshal retention configuration to XML
|
|||
retentionXML, err := xml.Marshal(retention) |
|||
if err != nil { |
|||
glog.Errorf("GetObjectRetentionHandler: failed to marshal retention: %v", err) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) |
|||
return |
|||
} |
|||
|
|||
// Set response headers
|
|||
w.Header().Set("Content-Type", "application/xml") |
|||
w.WriteHeader(http.StatusOK) |
|||
|
|||
// Write XML response
|
|||
if _, err := w.Write([]byte(xml.Header)); err != nil { |
|||
glog.Errorf("GetObjectRetentionHandler: failed to write XML header: %v", err) |
|||
return |
|||
} |
|||
|
|||
if _, err := w.Write(retentionXML); err != nil { |
|||
glog.Errorf("GetObjectRetentionHandler: failed to write retention XML: %v", err) |
|||
return |
|||
} |
|||
|
|||
// Record metrics
|
|||
stats_collect.RecordBucketActiveTime(bucket) |
|||
|
|||
glog.V(3).Infof("GetObjectRetentionHandler: successfully retrieved retention for %s/%s", bucket, object) |
|||
} |
|||
|
|||
// PutObjectLegalHoldHandler Put object Legal Hold
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html
|
|||
func (s3a *S3ApiServer) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { |
|||
bucket, object := s3_constants.GetBucketAndObject(r) |
|||
glog.V(3).Infof("PutObjectLegalHoldHandler %s %s", bucket, object) |
|||
|
|||
// Get version ID from query parameters
|
|||
versionId := r.URL.Query().Get("versionId") |
|||
|
|||
// Parse legal hold configuration from request body
|
|||
legalHold, err := parseObjectLegalHold(r) |
|||
if err != nil { |
|||
glog.Errorf("PutObjectLegalHoldHandler: failed to parse legal hold config: %v", err) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) |
|||
return |
|||
} |
|||
|
|||
// Validate legal hold configuration
|
|||
if err := validateLegalHold(legalHold); err != nil { |
|||
glog.Errorf("PutObjectLegalHoldHandler: invalid legal hold config: %v", err) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) |
|||
return |
|||
} |
|||
|
|||
// Set legal hold on the object
|
|||
if err := s3a.setObjectLegalHold(bucket, object, versionId, legalHold); err != nil { |
|||
glog.Errorf("PutObjectLegalHoldHandler: failed to set legal hold: %v", err) |
|||
|
|||
// Handle specific error cases
|
|||
if strings.Contains(err.Error(), "object not found") || strings.Contains(err.Error(), "version not found") { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) |
|||
return |
|||
} |
|||
|
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) |
|||
return |
|||
} |
|||
|
|||
// Record metrics
|
|||
stats_collect.RecordBucketActiveTime(bucket) |
|||
|
|||
// Return success (HTTP 200 with no body)
|
|||
w.WriteHeader(http.StatusOK) |
|||
glog.V(3).Infof("PutObjectLegalHoldHandler: successfully set legal hold for %s/%s", bucket, object) |
|||
} |
|||
|
|||
// GetObjectLegalHoldHandler Get object Legal Hold
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html
|
|||
func (s3a *S3ApiServer) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { |
|||
bucket, object := s3_constants.GetBucketAndObject(r) |
|||
glog.V(3).Infof("GetObjectLegalHoldHandler %s %s", bucket, object) |
|||
|
|||
// Get version ID from query parameters
|
|||
versionId := r.URL.Query().Get("versionId") |
|||
|
|||
// Get legal hold configuration for the object
|
|||
legalHold, err := s3a.getObjectLegalHold(bucket, object, versionId) |
|||
if err != nil { |
|||
glog.Errorf("GetObjectLegalHoldHandler: failed to get legal hold: %v", err) |
|||
|
|||
// Handle specific error cases
|
|||
if strings.Contains(err.Error(), "object not found") || strings.Contains(err.Error(), "version not found") { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) |
|||
return |
|||
} |
|||
|
|||
if strings.Contains(err.Error(), "no legal hold configuration found") { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLockConfiguration) |
|||
return |
|||
} |
|||
|
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) |
|||
return |
|||
} |
|||
|
|||
// Marshal legal hold configuration to XML
|
|||
legalHoldXML, err := xml.Marshal(legalHold) |
|||
if err != nil { |
|||
glog.Errorf("GetObjectLegalHoldHandler: failed to marshal legal hold: %v", err) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) |
|||
return |
|||
} |
|||
|
|||
// Set response headers
|
|||
w.Header().Set("Content-Type", "application/xml") |
|||
w.WriteHeader(http.StatusOK) |
|||
|
|||
// Write XML response
|
|||
if _, err := w.Write([]byte(xml.Header)); err != nil { |
|||
glog.Errorf("GetObjectLegalHoldHandler: failed to write XML header: %v", err) |
|||
return |
|||
} |
|||
|
|||
if _, err := w.Write(legalHoldXML); err != nil { |
|||
glog.Errorf("GetObjectLegalHoldHandler: failed to write legal hold XML: %v", err) |
|||
return |
|||
} |
|||
|
|||
// Record metrics
|
|||
stats_collect.RecordBucketActiveTime(bucket) |
|||
|
|||
glog.V(3).Infof("GetObjectLegalHoldHandler: successfully retrieved legal hold for %s/%s", bucket, object) |
|||
} |
|||
|
|||
// PutObjectLockConfigurationHandler Put object Lock configuration
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html
|
|||
func (s3a *S3ApiServer) PutObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) { |
|||
bucket, _ := s3_constants.GetBucketAndObject(r) |
|||
glog.V(3).Infof("PutObjectLockConfigurationHandler %s", bucket) |
|||
|
|||
// Parse object lock configuration from request body
|
|||
config, err := parseObjectLockConfiguration(r) |
|||
if err != nil { |
|||
glog.Errorf("PutObjectLockConfigurationHandler: failed to parse object lock config: %v", err) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) |
|||
return |
|||
} |
|||
|
|||
// Validate object lock configuration
|
|||
if config.ObjectLockEnabled != "" && config.ObjectLockEnabled != s3_constants.ObjectLockEnabled { |
|||
glog.Errorf("PutObjectLockConfigurationHandler: invalid object lock enabled value: %s", config.ObjectLockEnabled) |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) |
|||
return |
|||
} |
|||
|
|||
// Set object lock configuration on the bucket
|
|||
errCode := s3a.updateBucketConfig(bucket, func(bucketConfig *BucketConfig) error { |
|||
if bucketConfig.Entry.Extended == nil { |
|||
bucketConfig.Entry.Extended = make(map[string][]byte) |
|||
} |
|||
|
|||
// Store the configuration as JSON in extended attributes
|
|||
configXML, err := xml.Marshal(config) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
bucketConfig.Entry.Extended[s3_constants.ExtObjectLockConfigKey] = configXML |
|||
|
|||
if config.ObjectLockEnabled != "" { |
|||
bucketConfig.Entry.Extended[s3_constants.ExtObjectLockEnabledKey] = []byte(config.ObjectLockEnabled) |
|||
} |
|||
|
|||
return nil |
|||
}) |
|||
|
|||
if errCode != s3err.ErrNone { |
|||
glog.Errorf("PutObjectLockConfigurationHandler: failed to set object lock config: %v", errCode) |
|||
s3err.WriteErrorResponse(w, r, errCode) |
|||
return |
|||
} |
|||
|
|||
// Record metrics
|
|||
stats_collect.RecordBucketActiveTime(bucket) |
|||
|
|||
// Return success (HTTP 200 with no body)
|
|||
w.WriteHeader(http.StatusOK) |
|||
glog.V(3).Infof("PutObjectLockConfigurationHandler: successfully set object lock config for %s", bucket) |
|||
} |
|||
|
|||
// GetObjectLockConfigurationHandler Get object Lock configuration
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html
|
|||
func (s3a *S3ApiServer) GetObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) { |
|||
bucket, _ := s3_constants.GetBucketAndObject(r) |
|||
glog.V(3).Infof("GetObjectLockConfigurationHandler %s", bucket) |
|||
|
|||
// Get bucket configuration
|
|||
bucketConfig, errCode := s3a.getBucketConfig(bucket) |
|||
if errCode != s3err.ErrNone { |
|||
glog.Errorf("GetObjectLockConfigurationHandler: failed to get bucket config: %v", errCode) |
|||
s3err.WriteErrorResponse(w, r, errCode) |
|||
return |
|||
} |
|||
|
|||
// Check if object lock configuration exists
|
|||
if bucketConfig.Entry.Extended == nil { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLockConfiguration) |
|||
return |
|||
} |
|||
|
|||
configXML, exists := bucketConfig.Entry.Extended[s3_constants.ExtObjectLockConfigKey] |
|||
if !exists { |
|||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLockConfiguration) |
|||
return |
|||
} |
|||
|
|||
// Set response headers
|
|||
w.Header().Set("Content-Type", "application/xml") |
|||
w.WriteHeader(http.StatusOK) |
|||
|
|||
// Write XML response
|
|||
if _, err := w.Write([]byte(xml.Header)); err != nil { |
|||
glog.Errorf("GetObjectLockConfigurationHandler: failed to write XML header: %v", err) |
|||
return |
|||
} |
|||
|
|||
if _, err := w.Write(configXML); err != nil { |
|||
glog.Errorf("GetObjectLockConfigurationHandler: failed to write config XML: %v", err) |
|||
return |
|||
} |
|||
|
|||
// Record metrics
|
|||
stats_collect.RecordBucketActiveTime(bucket) |
|||
|
|||
glog.V(3).Infof("GetObjectLockConfigurationHandler: successfully retrieved object lock config for %s", bucket) |
|||
} |
@ -0,0 +1,611 @@ |
|||
package s3api |
|||
|
|||
import ( |
|||
"encoding/xml" |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" |
|||
) |
|||
|
|||
// ObjectRetention represents S3 Object Retention configuration
|
|||
type ObjectRetention struct { |
|||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Retention"` |
|||
Mode string `xml:"Mode,omitempty"` |
|||
RetainUntilDate *time.Time `xml:"RetainUntilDate,omitempty"` |
|||
} |
|||
|
|||
// ObjectLegalHold represents S3 Object Legal Hold configuration
|
|||
type ObjectLegalHold struct { |
|||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LegalHold"` |
|||
Status string `xml:"Status,omitempty"` |
|||
} |
|||
|
|||
// ObjectLockConfiguration represents S3 Object Lock Configuration
|
|||
type ObjectLockConfiguration struct { |
|||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ObjectLockConfiguration"` |
|||
ObjectLockEnabled string `xml:"ObjectLockEnabled,omitempty"` |
|||
Rule *ObjectLockRule `xml:"Rule,omitempty"` |
|||
} |
|||
|
|||
// ObjectLockRule represents an Object Lock Rule
|
|||
type ObjectLockRule struct { |
|||
XMLName xml.Name `xml:"Rule"` |
|||
DefaultRetention *DefaultRetention `xml:"DefaultRetention,omitempty"` |
|||
} |
|||
|
|||
// DefaultRetention represents default retention settings
|
|||
type DefaultRetention struct { |
|||
XMLName xml.Name `xml:"DefaultRetention"` |
|||
Mode string `xml:"Mode,omitempty"` |
|||
Days int `xml:"Days,omitempty"` |
|||
Years int `xml:"Years,omitempty"` |
|||
} |
|||
|
|||
// Custom time marshalling for AWS S3 ISO8601 format
|
|||
func (or *ObjectRetention) MarshalXML(e *xml.Encoder, start xml.StartElement) error { |
|||
type Alias ObjectRetention |
|||
aux := &struct { |
|||
*Alias |
|||
RetainUntilDate *string `xml:"RetainUntilDate,omitempty"` |
|||
}{ |
|||
Alias: (*Alias)(or), |
|||
} |
|||
|
|||
if or.RetainUntilDate != nil { |
|||
dateStr := or.RetainUntilDate.UTC().Format(time.RFC3339) |
|||
aux.RetainUntilDate = &dateStr |
|||
} |
|||
|
|||
return e.EncodeElement(aux, start) |
|||
} |
|||
|
|||
// Custom time unmarshalling for AWS S3 ISO8601 format
|
|||
func (or *ObjectRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { |
|||
type Alias ObjectRetention |
|||
aux := &struct { |
|||
*Alias |
|||
RetainUntilDate *string `xml:"RetainUntilDate,omitempty"` |
|||
}{ |
|||
Alias: (*Alias)(or), |
|||
} |
|||
|
|||
if err := d.DecodeElement(aux, &start); err != nil { |
|||
return err |
|||
} |
|||
|
|||
if aux.RetainUntilDate != nil { |
|||
t, err := time.Parse(time.RFC3339, *aux.RetainUntilDate) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
or.RetainUntilDate = &t |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// parseObjectRetention parses XML retention configuration from request body
|
|||
func parseObjectRetention(r *http.Request) (*ObjectRetention, error) { |
|||
if r.Body == nil { |
|||
return nil, fmt.Errorf("empty request body") |
|||
} |
|||
|
|||
body, err := io.ReadAll(r.Body) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("error reading request body: %v", err) |
|||
} |
|||
|
|||
var retention ObjectRetention |
|||
if err := xml.Unmarshal(body, &retention); err != nil { |
|||
return nil, fmt.Errorf("error parsing XML: %v", err) |
|||
} |
|||
|
|||
return &retention, nil |
|||
} |
|||
|
|||
// parseObjectLegalHold parses XML legal hold configuration from request body
|
|||
func parseObjectLegalHold(r *http.Request) (*ObjectLegalHold, error) { |
|||
if r.Body == nil { |
|||
return nil, fmt.Errorf("empty request body") |
|||
} |
|||
|
|||
body, err := io.ReadAll(r.Body) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("error reading request body: %v", err) |
|||
} |
|||
|
|||
var legalHold ObjectLegalHold |
|||
if err := xml.Unmarshal(body, &legalHold); err != nil { |
|||
return nil, fmt.Errorf("error parsing XML: %v", err) |
|||
} |
|||
|
|||
return &legalHold, nil |
|||
} |
|||
|
|||
// parseObjectLockConfiguration parses XML object lock configuration from request body
|
|||
func parseObjectLockConfiguration(r *http.Request) (*ObjectLockConfiguration, error) { |
|||
if r.Body == nil { |
|||
return nil, fmt.Errorf("empty request body") |
|||
} |
|||
|
|||
body, err := io.ReadAll(r.Body) |
|||
if err != nil { |
|||
return nil, fmt.Errorf("error reading request body: %v", err) |
|||
} |
|||
|
|||
var config ObjectLockConfiguration |
|||
if err := xml.Unmarshal(body, &config); err != nil { |
|||
return nil, fmt.Errorf("error parsing XML: %v", err) |
|||
} |
|||
|
|||
return &config, nil |
|||
} |
|||
|
|||
// validateRetention validates retention configuration
|
|||
func validateRetention(retention *ObjectRetention) error { |
|||
if retention.Mode == "" && retention.RetainUntilDate == nil { |
|||
return fmt.Errorf("retention configuration must specify either Mode or RetainUntilDate") |
|||
} |
|||
|
|||
if retention.Mode != "" { |
|||
if retention.Mode != s3_constants.RetentionModeGovernance && retention.Mode != s3_constants.RetentionModeCompliance { |
|||
return fmt.Errorf("invalid retention mode: %s", retention.Mode) |
|||
} |
|||
} |
|||
|
|||
if retention.RetainUntilDate != nil { |
|||
if retention.RetainUntilDate.Before(time.Now()) { |
|||
return fmt.Errorf("retain until date must be in the future") |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// validateLegalHold validates legal hold configuration
|
|||
func validateLegalHold(legalHold *ObjectLegalHold) error { |
|||
if legalHold.Status != s3_constants.LegalHoldOn && legalHold.Status != s3_constants.LegalHoldOff { |
|||
return fmt.Errorf("invalid legal hold status: %s", legalHold.Status) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// getObjectRetention retrieves retention configuration from object metadata
|
|||
func (s3a *S3ApiServer) getObjectRetention(bucket, object, versionId string) (*ObjectRetention, error) { |
|||
var entry *filer_pb.Entry |
|||
var err error |
|||
|
|||
if versionId != "" { |
|||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) |
|||
} else { |
|||
// Check if versioning is enabled
|
|||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) |
|||
if vErr != nil { |
|||
return nil, fmt.Errorf("error checking versioning: %v", vErr) |
|||
} |
|||
|
|||
if versioningEnabled { |
|||
entry, err = s3a.getLatestObjectVersion(bucket, object) |
|||
} else { |
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
entry, err = s3a.getEntry(bucketDir, object) |
|||
} |
|||
} |
|||
|
|||
if err != nil { |
|||
return nil, fmt.Errorf("object not found: %v", err) |
|||
} |
|||
|
|||
if entry.Extended == nil { |
|||
return nil, fmt.Errorf("no retention configuration found") |
|||
} |
|||
|
|||
retention := &ObjectRetention{} |
|||
|
|||
if modeBytes, exists := entry.Extended[s3_constants.ExtRetentionModeKey]; exists { |
|||
retention.Mode = string(modeBytes) |
|||
} |
|||
|
|||
if dateBytes, exists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; exists { |
|||
if timestamp, err := strconv.ParseInt(string(dateBytes), 10, 64); err == nil { |
|||
t := time.Unix(timestamp, 0) |
|||
retention.RetainUntilDate = &t |
|||
} |
|||
} |
|||
|
|||
if retention.Mode == "" && retention.RetainUntilDate == nil { |
|||
return nil, fmt.Errorf("no retention configuration found") |
|||
} |
|||
|
|||
return retention, nil |
|||
} |
|||
|
|||
// setObjectRetention sets retention configuration on object metadata
|
|||
func (s3a *S3ApiServer) setObjectRetention(bucket, object, versionId string, retention *ObjectRetention, bypassGovernance bool) error { |
|||
var entry *filer_pb.Entry |
|||
var err error |
|||
var entryPath string |
|||
|
|||
if versionId != "" { |
|||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) |
|||
if err != nil { |
|||
return fmt.Errorf("version not found: %v", err) |
|||
} |
|||
// For versioned objects, we need to update the version file
|
|||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) |
|||
} else { |
|||
// Check if versioning is enabled
|
|||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) |
|||
if vErr != nil { |
|||
return fmt.Errorf("error checking versioning: %v", vErr) |
|||
} |
|||
|
|||
if versioningEnabled { |
|||
entry, err = s3a.getLatestObjectVersion(bucket, object) |
|||
if err != nil { |
|||
return fmt.Errorf("latest version not found: %v", err) |
|||
} |
|||
// Extract version ID from entry metadata
|
|||
if entry.Extended != nil { |
|||
if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists { |
|||
versionId = string(versionIdBytes) |
|||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) |
|||
} |
|||
} |
|||
} else { |
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
entry, err = s3a.getEntry(bucketDir, object) |
|||
if err != nil { |
|||
return fmt.Errorf("object not found: %v", err) |
|||
} |
|||
entryPath = object |
|||
} |
|||
} |
|||
|
|||
// Check if object is already under retention
|
|||
if entry.Extended != nil { |
|||
if existingMode, exists := entry.Extended[s3_constants.ExtRetentionModeKey]; exists { |
|||
if string(existingMode) == s3_constants.RetentionModeCompliance && !bypassGovernance { |
|||
return fmt.Errorf("cannot modify retention on object under COMPLIANCE mode") |
|||
} |
|||
|
|||
if existingDateBytes, dateExists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; dateExists { |
|||
if timestamp, err := strconv.ParseInt(string(existingDateBytes), 10, 64); err == nil { |
|||
existingDate := time.Unix(timestamp, 0) |
|||
if existingDate.After(time.Now()) && string(existingMode) == s3_constants.RetentionModeGovernance && !bypassGovernance { |
|||
return fmt.Errorf("cannot modify retention on object under GOVERNANCE mode without bypass") |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Update retention metadata
|
|||
if entry.Extended == nil { |
|||
entry.Extended = make(map[string][]byte) |
|||
} |
|||
|
|||
if retention.Mode != "" { |
|||
entry.Extended[s3_constants.ExtRetentionModeKey] = []byte(retention.Mode) |
|||
} |
|||
|
|||
if retention.RetainUntilDate != nil { |
|||
entry.Extended[s3_constants.ExtRetentionUntilDateKey] = []byte(strconv.FormatInt(retention.RetainUntilDate.Unix(), 10)) |
|||
|
|||
// Also update the existing WORM fields for compatibility
|
|||
entry.WormEnforcedAtTsNs = time.Now().UnixNano() |
|||
} |
|||
|
|||
// Update the entry
|
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
return s3a.mkFile(bucketDir, entryPath, entry.Chunks, func(updatedEntry *filer_pb.Entry) { |
|||
updatedEntry.Extended = entry.Extended |
|||
updatedEntry.WormEnforcedAtTsNs = entry.WormEnforcedAtTsNs |
|||
}) |
|||
} |
|||
|
|||
// getObjectLegalHold retrieves legal hold configuration from object metadata
|
|||
func (s3a *S3ApiServer) getObjectLegalHold(bucket, object, versionId string) (*ObjectLegalHold, error) { |
|||
var entry *filer_pb.Entry |
|||
var err error |
|||
|
|||
if versionId != "" { |
|||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) |
|||
} else { |
|||
// Check if versioning is enabled
|
|||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) |
|||
if vErr != nil { |
|||
return nil, fmt.Errorf("error checking versioning: %v", vErr) |
|||
} |
|||
|
|||
if versioningEnabled { |
|||
entry, err = s3a.getLatestObjectVersion(bucket, object) |
|||
} else { |
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
entry, err = s3a.getEntry(bucketDir, object) |
|||
} |
|||
} |
|||
|
|||
if err != nil { |
|||
return nil, fmt.Errorf("object not found: %v", err) |
|||
} |
|||
|
|||
if entry.Extended == nil { |
|||
return nil, fmt.Errorf("no legal hold configuration found") |
|||
} |
|||
|
|||
legalHold := &ObjectLegalHold{} |
|||
|
|||
if statusBytes, exists := entry.Extended[s3_constants.ExtLegalHoldKey]; exists { |
|||
legalHold.Status = string(statusBytes) |
|||
} else { |
|||
return nil, fmt.Errorf("no legal hold configuration found") |
|||
} |
|||
|
|||
return legalHold, nil |
|||
} |
|||
|
|||
// setObjectLegalHold sets legal hold configuration on object metadata
|
|||
func (s3a *S3ApiServer) setObjectLegalHold(bucket, object, versionId string, legalHold *ObjectLegalHold) error { |
|||
var entry *filer_pb.Entry |
|||
var err error |
|||
var entryPath string |
|||
|
|||
if versionId != "" { |
|||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) |
|||
if err != nil { |
|||
return fmt.Errorf("version not found: %v", err) |
|||
} |
|||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) |
|||
} else { |
|||
// Check if versioning is enabled
|
|||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) |
|||
if vErr != nil { |
|||
return fmt.Errorf("error checking versioning: %v", vErr) |
|||
} |
|||
|
|||
if versioningEnabled { |
|||
entry, err = s3a.getLatestObjectVersion(bucket, object) |
|||
if err != nil { |
|||
return fmt.Errorf("latest version not found: %v", err) |
|||
} |
|||
// Extract version ID from entry metadata
|
|||
if entry.Extended != nil { |
|||
if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists { |
|||
versionId = string(versionIdBytes) |
|||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) |
|||
} |
|||
} |
|||
} else { |
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
entry, err = s3a.getEntry(bucketDir, object) |
|||
if err != nil { |
|||
return fmt.Errorf("object not found: %v", err) |
|||
} |
|||
entryPath = object |
|||
} |
|||
} |
|||
|
|||
// Update legal hold metadata
|
|||
if entry.Extended == nil { |
|||
entry.Extended = make(map[string][]byte) |
|||
} |
|||
|
|||
entry.Extended[s3_constants.ExtLegalHoldKey] = []byte(legalHold.Status) |
|||
|
|||
// Update the entry
|
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
return s3a.mkFile(bucketDir, entryPath, entry.Chunks, func(updatedEntry *filer_pb.Entry) { |
|||
updatedEntry.Extended = entry.Extended |
|||
}) |
|||
} |
|||
|
|||
// isObjectRetentionActive checks if an object is currently under retention
|
|||
func (s3a *S3ApiServer) isObjectRetentionActive(bucket, object, versionId string) (bool, error) { |
|||
retention, err := s3a.getObjectRetention(bucket, object, versionId) |
|||
if err != nil { |
|||
// If no retention found, object is not under retention
|
|||
if strings.Contains(err.Error(), "no retention configuration found") { |
|||
return false, nil |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
if retention.RetainUntilDate != nil && retention.RetainUntilDate.After(time.Now()) { |
|||
return true, nil |
|||
} |
|||
|
|||
return false, nil |
|||
} |
|||
|
|||
// isObjectLegalHoldActive checks if an object is currently under legal hold
|
|||
func (s3a *S3ApiServer) isObjectLegalHoldActive(bucket, object, versionId string) (bool, error) { |
|||
legalHold, err := s3a.getObjectLegalHold(bucket, object, versionId) |
|||
if err != nil { |
|||
// If no legal hold found, object is not under legal hold
|
|||
if strings.Contains(err.Error(), "no legal hold configuration found") { |
|||
return false, nil |
|||
} |
|||
return false, err |
|||
} |
|||
|
|||
return legalHold.Status == s3_constants.LegalHoldOn, nil |
|||
} |
|||
|
|||
// checkObjectLockPermissions checks if an object can be deleted or modified
|
|||
func (s3a *S3ApiServer) checkObjectLockPermissions(bucket, object, versionId string, bypassGovernance bool) error { |
|||
// Check if object is under retention
|
|||
retentionActive, err := s3a.isObjectRetentionActive(bucket, object, versionId) |
|||
if err != nil { |
|||
glog.Warningf("Error checking retention for %s/%s: %v", bucket, object, err) |
|||
} |
|||
|
|||
// Check if object is under legal hold
|
|||
legalHoldActive, err := s3a.isObjectLegalHoldActive(bucket, object, versionId) |
|||
if err != nil { |
|||
glog.Warningf("Error checking legal hold for %s/%s: %v", bucket, object, err) |
|||
} |
|||
|
|||
// If object is under legal hold, it cannot be deleted or modified
|
|||
if legalHoldActive { |
|||
return fmt.Errorf("object is under legal hold and cannot be deleted or modified") |
|||
} |
|||
|
|||
// If object is under retention, check the mode
|
|||
if retentionActive { |
|||
retention, err := s3a.getObjectRetention(bucket, object, versionId) |
|||
if err != nil { |
|||
return fmt.Errorf("error getting retention configuration: %v", err) |
|||
} |
|||
|
|||
if retention.Mode == s3_constants.RetentionModeCompliance { |
|||
return fmt.Errorf("object is under COMPLIANCE mode retention and cannot be deleted or modified") |
|||
} |
|||
|
|||
if retention.Mode == s3_constants.RetentionModeGovernance && !bypassGovernance { |
|||
return fmt.Errorf("object is under GOVERNANCE mode retention and cannot be deleted or modified without bypass") |
|||
} |
|||
} |
|||
|
|||
// Check existing WORM enforcement for compatibility
|
|||
if err := s3a.checkLegacyWormEnforcement(bucket, object, versionId, bypassGovernance); err != nil { |
|||
return err |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// checkLegacyWormEnforcement checks the existing WORM infrastructure for compatibility
|
|||
func (s3a *S3ApiServer) checkLegacyWormEnforcement(bucket, object, versionId string, bypassGovernance bool) error { |
|||
var entry *filer_pb.Entry |
|||
var err error |
|||
|
|||
if versionId != "" { |
|||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) |
|||
} else { |
|||
// Check if versioning is enabled
|
|||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) |
|||
if vErr != nil { |
|||
// If we can't check versioning, skip WORM check to avoid false positives
|
|||
return nil |
|||
} |
|||
|
|||
if versioningEnabled { |
|||
entry, err = s3a.getLatestObjectVersion(bucket, object) |
|||
} else { |
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
entry, err = s3a.getEntry(bucketDir, object) |
|||
} |
|||
} |
|||
|
|||
if err != nil { |
|||
// If object doesn't exist, no WORM enforcement applies
|
|||
return nil |
|||
} |
|||
|
|||
// Check if legacy WORM is enforced
|
|||
if entry.WormEnforcedAtTsNs == 0 { |
|||
return nil |
|||
} |
|||
|
|||
// Check if this is under legacy WORM enforcement
|
|||
// For compatibility, we treat legacy WORM similar to GOVERNANCE mode
|
|||
// (can be bypassed with appropriate permissions)
|
|||
if !bypassGovernance { |
|||
return fmt.Errorf("object is under legacy WORM enforcement and cannot be deleted or modified without bypass") |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// integrateWithWormSystem ensures compatibility between S3 retention and legacy WORM
|
|||
func (s3a *S3ApiServer) integrateWithWormSystem(entry *filer_pb.Entry, retention *ObjectRetention) { |
|||
if retention == nil || retention.RetainUntilDate == nil { |
|||
return |
|||
} |
|||
|
|||
// Set the legacy WORM timestamp for backward compatibility
|
|||
if entry.WormEnforcedAtTsNs == 0 { |
|||
entry.WormEnforcedAtTsNs = time.Now().UnixNano() |
|||
} |
|||
|
|||
// Store additional S3 retention metadata in extended attributes
|
|||
if entry.Extended == nil { |
|||
entry.Extended = make(map[string][]byte) |
|||
} |
|||
|
|||
if retention.Mode != "" { |
|||
entry.Extended[s3_constants.ExtRetentionModeKey] = []byte(retention.Mode) |
|||
} |
|||
|
|||
if retention.RetainUntilDate != nil { |
|||
entry.Extended[s3_constants.ExtRetentionUntilDateKey] = []byte(strconv.FormatInt(retention.RetainUntilDate.Unix(), 10)) |
|||
} |
|||
} |
|||
|
|||
// isObjectWormProtected checks both S3 retention and legacy WORM for complete protection status
|
|||
func (s3a *S3ApiServer) isObjectWormProtected(bucket, object, versionId string) (bool, error) { |
|||
// Check S3 object retention
|
|||
retentionActive, err := s3a.isObjectRetentionActive(bucket, object, versionId) |
|||
if err != nil { |
|||
glog.V(4).Infof("Error checking S3 retention for %s/%s: %v", bucket, object, err) |
|||
} |
|||
|
|||
// Check S3 legal hold
|
|||
legalHoldActive, err := s3a.isObjectLegalHoldActive(bucket, object, versionId) |
|||
if err != nil { |
|||
glog.V(4).Infof("Error checking S3 legal hold for %s/%s: %v", bucket, object, err) |
|||
} |
|||
|
|||
// Check legacy WORM enforcement
|
|||
legacyWormActive, err := s3a.isLegacyWormActive(bucket, object, versionId) |
|||
if err != nil { |
|||
glog.V(4).Infof("Error checking legacy WORM for %s/%s: %v", bucket, object, err) |
|||
} |
|||
|
|||
return retentionActive || legalHoldActive || legacyWormActive, nil |
|||
} |
|||
|
|||
// isLegacyWormActive checks if an object is under legacy WORM enforcement
|
|||
func (s3a *S3ApiServer) isLegacyWormActive(bucket, object, versionId string) (bool, error) { |
|||
var entry *filer_pb.Entry |
|||
var err error |
|||
|
|||
if versionId != "" { |
|||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) |
|||
} else { |
|||
// Check if versioning is enabled
|
|||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) |
|||
if vErr != nil { |
|||
return false, nil |
|||
} |
|||
|
|||
if versioningEnabled { |
|||
entry, err = s3a.getLatestObjectVersion(bucket, object) |
|||
} else { |
|||
bucketDir := s3a.option.BucketsPath + "/" + bucket |
|||
entry, err = s3a.getEntry(bucketDir, object) |
|||
} |
|||
} |
|||
|
|||
if err != nil { |
|||
return false, nil |
|||
} |
|||
|
|||
// Check if legacy WORM is enforced and still active
|
|||
if entry.WormEnforcedAtTsNs == 0 { |
|||
return false, nil |
|||
} |
|||
|
|||
// For now, we consider legacy WORM as always active when set
|
|||
// The original WORM system should handle time-based expiration
|
|||
return true, nil |
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue