Tree:
				f096b067fd
			
			
		
		add-admin-and-worker-to-helm-charts
			
				add-ec-vacuum
			
				add-foundation-db
			
				add_fasthttp_client
			
				add_remote_storage
			
				adding-message-queue-integration-tests
			
				avoid_releasing_temp_file_on_write
			
				changing-to-zap
			
				collect-public-metrics
			
				create-table-snapshot-api-design
			
				data_query_pushdown
			
				dependabot/maven/other/java/client/com.google.protobuf-protobuf-java-3.25.5
			
				dependabot/maven/other/java/examples/org.apache.hadoop-hadoop-common-3.4.0
			
				detect-and-plan-ec-tasks
			
				do-not-retry-if-error-is-NotFound
			
				enhance-erasure-coding
			
				fasthttp
			
				filer1_maintenance_branch
			
				fix-GetObjectLockConfigurationHandler
			
				fix-versioning-listing-only
			
				ftp
			
				gh-pages
			
				improve-fuse-mount
			
				improve-fuse-mount2
			
				logrus
			
				master
			
				message_send
			
				mount2
			
				mq-subscribe
			
				mq2
			
				original_weed_mount
			
				pr-7412
			
				random_access_file
			
				refactor-needle-read-operations
			
				refactor-volume-write
			
				remote_overlay
			
				revert-5134-patch-1
			
				revert-5819-patch-1
			
				revert-6434-bugfix-missing-s3-audit
			
				s3-select
			
				sub
			
				tcp_read
			
				test-reverting-lock-table
			
				test_udp
			
				testing
			
				testing-sdx-generation
			
				tikv
			
				track-mount-e2e
			
				volume_buffered_writes
			
				worker-execute-ec-tasks
			
			
				0.72
			
				0.72.release
			
				0.73
			
				0.74
			
				0.75
			
				0.76
			
				0.77
			
				0.90
			
				0.91
			
				0.92
			
				0.93
			
				0.94
			
				0.95
			
				0.96
			
				0.97
			
				0.98
			
				0.99
			
				1.00
			
				1.01
			
				1.02
			
				1.03
			
				1.04
			
				1.05
			
				1.06
			
				1.07
			
				1.08
			
				1.09
			
				1.10
			
				1.11
			
				1.12
			
				1.14
			
				1.15
			
				1.16
			
				1.17
			
				1.18
			
				1.19
			
				1.20
			
				1.21
			
				1.22
			
				1.23
			
				1.24
			
				1.25
			
				1.26
			
				1.27
			
				1.28
			
				1.29
			
				1.30
			
				1.31
			
				1.32
			
				1.33
			
				1.34
			
				1.35
			
				1.36
			
				1.37
			
				1.38
			
				1.40
			
				1.41
			
				1.42
			
				1.43
			
				1.44
			
				1.45
			
				1.46
			
				1.47
			
				1.48
			
				1.49
			
				1.50
			
				1.51
			
				1.52
			
				1.53
			
				1.54
			
				1.55
			
				1.56
			
				1.57
			
				1.58
			
				1.59
			
				1.60
			
				1.61
			
				1.61RC
			
				1.62
			
				1.63
			
				1.64
			
				1.65
			
				1.66
			
				1.67
			
				1.68
			
				1.69
			
				1.70
			
				1.71
			
				1.72
			
				1.73
			
				1.74
			
				1.75
			
				1.76
			
				1.77
			
				1.78
			
				1.79
			
				1.80
			
				1.81
			
				1.82
			
				1.83
			
				1.84
			
				1.85
			
				1.86
			
				1.87
			
				1.88
			
				1.90
			
				1.91
			
				1.92
			
				1.93
			
				1.94
			
				1.95
			
				1.96
			
				1.97
			
				1.98
			
				1.99
			
				1;70
			
				2.00
			
				2.01
			
				2.02
			
				2.03
			
				2.04
			
				2.05
			
				2.06
			
				2.07
			
				2.08
			
				2.09
			
				2.10
			
				2.11
			
				2.12
			
				2.13
			
				2.14
			
				2.15
			
				2.16
			
				2.17
			
				2.18
			
				2.19
			
				2.20
			
				2.21
			
				2.22
			
				2.23
			
				2.24
			
				2.25
			
				2.26
			
				2.27
			
				2.28
			
				2.29
			
				2.30
			
				2.31
			
				2.32
			
				2.33
			
				2.34
			
				2.35
			
				2.36
			
				2.37
			
				2.38
			
				2.39
			
				2.40
			
				2.41
			
				2.42
			
				2.43
			
				2.47
			
				2.48
			
				2.49
			
				2.50
			
				2.51
			
				2.52
			
				2.53
			
				2.54
			
				2.55
			
				2.56
			
				2.57
			
				2.58
			
				2.59
			
				2.60
			
				2.61
			
				2.62
			
				2.63
			
				2.64
			
				2.65
			
				2.66
			
				2.67
			
				2.68
			
				2.69
			
				2.70
			
				2.71
			
				2.72
			
				2.73
			
				2.74
			
				2.75
			
				2.76
			
				2.77
			
				2.78
			
				2.79
			
				2.80
			
				2.81
			
				2.82
			
				2.83
			
				2.84
			
				2.85
			
				2.86
			
				2.87
			
				2.88
			
				2.89
			
				2.90
			
				2.91
			
				2.92
			
				2.93
			
				2.94
			
				2.95
			
				2.96
			
				2.97
			
				2.98
			
				2.99
			
				3.00
			
				3.01
			
				3.02
			
				3.03
			
				3.04
			
				3.05
			
				3.06
			
				3.07
			
				3.08
			
				3.09
			
				3.10
			
				3.11
			
				3.12
			
				3.13
			
				3.14
			
				3.15
			
				3.16
			
				3.18
			
				3.19
			
				3.20
			
				3.21
			
				3.22
			
				3.23
			
				3.24
			
				3.25
			
				3.26
			
				3.27
			
				3.28
			
				3.29
			
				3.30
			
				3.31
			
				3.32
			
				3.33
			
				3.34
			
				3.35
			
				3.36
			
				3.37
			
				3.38
			
				3.39
			
				3.40
			
				3.41
			
				3.42
			
				3.43
			
				3.44
			
				3.45
			
				3.46
			
				3.47
			
				3.48
			
				3.50
			
				3.51
			
				3.52
			
				3.53
			
				3.54
			
				3.55
			
				3.56
			
				3.57
			
				3.58
			
				3.59
			
				3.60
			
				3.61
			
				3.62
			
				3.63
			
				3.64
			
				3.65
			
				3.66
			
				3.67
			
				3.68
			
				3.69
			
				3.71
			
				3.72
			
				3.73
			
				3.74
			
				3.75
			
				3.76
			
				3.77
			
				3.78
			
				3.79
			
				3.80
			
				3.81
			
				3.82
			
				3.83
			
				3.84
			
				3.85
			
				3.86
			
				3.87
			
				3.88
			
				3.89
			
				3.90
			
				3.91
			
				3.92
			
				3.93
			
				3.94
			
				3.95
			
				3.96
			
				3.97
			
				3.98
			
				3.99
			
				4.00
			
				dev
			
				helm-3.65.1
			
				v0.69
			
				v0.70beta
			
				v3.33
			
		${ noResults }
		
	
		
			7658 Commits (f096b067fd2171919810d6e9536e9068b899f7db)
		
	
	
		
	
    | Author | SHA1 | Message | Date | 
|---|---|---|---|
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								f096b067fd
								
									 | 
						
							
							
								
								weed master add peers=none option for faster startup (#7419)
							
							
							
							
							
							
								
* weed master -peers=none * single master mode only when peers is none * refactoring * revert duplicated code * revert * Update weed/command/master.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * preventing "none" passed to other components if master is not started --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>  | 
						3 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								5ab49e2971
								
									 | 
						
							
							
								
								Adjust cli option (#7418)
							
							
							
							
							
							
								
* adjust "weed benchmark" CLI to use readOnly/writeOnly * consistently use "-master" CLI option * If both -readOnly and -writeOnly are specified, the current logic silently allows it with -writeOnly taking precedence. This is confusing and could lead to unexpected behavior.  | 
						3 days ago | 
| 
							
							
								 | 
						58acc14d2c | 
							
							
								
								avoid unnecessary fail fast
							
							
							
							
							
							
								
fix https://github.com/seaweedfs/seaweedfs/issues/7417  | 
						3 days ago | 
| 
							
							
								 | 
						f00ae727b7 | 
							
							
								
								detect ipv6
							
							
							
							
								
 | 
						4 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								d745e6e41d
								
									 | 
						
							
							
								
								Fix masterclient vidmap race condition (#7412)
							
							
							
							
							
							
								
* fallback to check master
* clean up
* parsing
* refactor
* handle parse error
* return error
* avoid dup lookup
* use batch key
* dedup lookup logic
* address comments
* errors.Join(lookupErrors...)
* add a comment
* Fix: Critical data race in MasterClient vidMap
Fixes a critical data race where resetVidMap() was writing to the vidMap
pointer while other methods were reading it concurrently without synchronization.
Changes:
- Removed embedded *vidMap from MasterClient
- Added vidMapLock (sync.RWMutex) to protect vidMap pointer access
- Created safe accessor methods (GetLocations, GetDataCenter, etc.)
- Updated all direct vidMap accesses to use thread-safe methods
- Updated resetVidMap() to acquire write lock during pointer swap
The vidMap already has internal locking for its operations, but this fix
protects the vidMap pointer itself from concurrent read/write races.
Verified with: go test -race ./weed/wdclient/...
Impact:
- Prevents potential panics from concurrent pointer access
- No performance impact - uses RWMutex for read-heavy workloads
- Maintains backward compatibility through wrapper methods
* fmt
* Fix: Critical data race in MasterClient vidMap
Fixes a critical data race where resetVidMap() was writing to the vidMap
pointer while other methods were reading it concurrently without synchronization.
Changes:
- Removed embedded *vidMap from MasterClient struct
- Added vidMapLock (sync.RWMutex) to protect vidMap pointer access
- Created minimal public accessor methods for external packages:
  * GetLocations, GetLocationsClone, GetVidLocations
  * LookupFileId, LookupVolumeServerUrl
  * GetDataCenter
- Internal code directly locks and accesses vidMap (no extra indirection)
- Updated resetVidMap() to acquire write lock during pointer swap
- Updated shell/commands.go to use GetDataCenter() method
Design Philosophy:
- vidMap already has internal locking for its map operations
- This fix specifically protects the vidMap *pointer* from concurrent access
- Public methods for external callers, direct locking for internal use
- Minimizes wrapper overhead while maintaining thread safety
Verified with: go test -race ./weed/wdclient/... (passes)
Impact:
- Prevents potential panics/crashes from data races
- Minimal performance impact (RWMutex for read-heavy workload)
- Maintains full backward compatibility
* fix more concurrent access
* reduce lock scope
* Optimize vidMap locking for better concurrency
Improved locking strategy based on the understanding that:
- vidMapLock protects the vidMap pointer from concurrent swaps
- vidMap has internal locks that protect its data structures
Changes:
1. Read operations: Grab pointer with RLock, release immediately, then operate
   - Reduces lock hold time
   - Allows resetVidMap to proceed sooner
   - Methods: GetLocations, GetLocationsClone, GetVidLocations,
     LookupVolumeServerUrl, GetDataCenter
2. Write operations: Changed from Lock() to RLock()
   - RLock prevents pointer swap during operation
   - Allows concurrent readers and other writers (serialized by vidMap's lock)
   - Methods: addLocation, deleteLocation, addEcLocation, deleteEcLocation
Benefits:
- Significantly reduced lock contention
- Better concurrent performance under load
- Still prevents all race conditions
Verified with: go test -race ./weed/wdclient/... (passes)
* Further reduce lock contention in LookupVolumeIdsWithFallback
Optimized two loops that were holding RLock for extended periods:
Before:
- Held RLock during entire loop iteration
- Included string parsing and cache lookups
- Could block resetVidMap for significant time with large batches
After:
- Grab vidMap pointer with brief RLock
- Release lock immediately
- Perform all loop operations on local pointer
Impact:
- First loop: Cache check on initial volumeIds
- Second loop: Double-check after singleflight wait
Benefits:
- Minimal lock hold time (just pointer copy)
- resetVidMap no longer blocked by long loops
- Better concurrent performance with large volume ID lists
- Still thread-safe (vidMap methods have internal locks)
Verified with: go test -race ./weed/wdclient/... (passes)
* Add clarifying comments to vidMap helper functions
Added inline documentation to each helper function (addLocation, deleteLocation,
addEcLocation, deleteEcLocation) explaining the two-level locking strategy:
- RLock on vidMapLock prevents resetVidMap from swapping the pointer
- vidMap has internal locks that protect the actual map mutations
- This design provides optimal concurrency
The comments make it clear why RLock (not Lock) is correct and intentional,
preventing future confusion about the locking strategy.
* Improve encapsulation: Add shallowClone() method to vidMap
Added a shallowClone() method to vidMap to improve encapsulation and prevent
MasterClient from directly accessing vidMap's internal fields.
Changes:
1. Added vidMap.shallowClone() in vid_map.go
   - Encapsulates the shallow copy logic within vidMap
   - Makes vidMap responsible for its own state representation
   - Documented that caller is responsible for thread safety
2. Simplified resetVidMap() in masterclient.go
   - Uses tail := mc.vidMap.shallowClone() instead of manual field access
   - Cleaner, more maintainable code
   - Better adherence to encapsulation principles
Benefits:
- Improved code organization and maintainability
- vidMap internals are now properly encapsulated
- Easier to modify vidMap structure in the future
- More self-documenting code
Verified with: go test -race ./weed/wdclient/... (passes)
* Optimize locking: Reduce lock acquisitions and use helper methods
Two optimizations to further reduce lock contention and improve code consistency:
1. LookupFileIdWithFallback: Eliminated redundant lock acquisition
   - Before: Two separate locks to get vidMap and dataCenter
   - After: Single lock gets both values together
   - Benefit: 50% reduction in lock/unlock overhead for this hot path
2. KeepConnected: Use GetDataCenter() helper for consistency
   - Before: Manual lock/unlock to access DataCenter field
   - After: Use existing GetDataCenter() helper method
   - Benefit: Better encapsulation and code consistency
Impact:
- Reduced lock contention in high-traffic lookup path
- More consistent use of accessor methods throughout codebase
- Cleaner, more maintainable code
Verified with: go test -race ./weed/wdclient/... (passes)
* Refactor: Extract common locking patterns into helper methods
Eliminated code duplication by introducing two helper methods that encapsulate
the common locking patterns used throughout MasterClient:
1. getStableVidMap() - For read operations
   - Acquires lock, gets pointer, releases immediately
   - Returns stable snapshot for thread-safe reads
   - Used by: GetLocations, GetLocationsClone, GetVidLocations,
     LookupFileId, LookupVolumeServerUrl, GetDataCenter
2. withCurrentVidMap(f func(vm *vidMap)) - For write operations
   - Holds RLock during callback execution
   - Prevents pointer swap while allowing concurrent operations
   - Used by: addLocation, deleteLocation, addEcLocation, deleteEcLocation
Benefits:
- Reduced code duplication (eliminated 48 lines of repetitive locking code)
- Centralized locking logic makes it easier to understand and maintain
- Self-documenting pattern through named helper methods
- Easier to modify locking strategy in the future (single point of change)
- Improved readability - accessor methods are now one-liners
Code size reduction: ~40% fewer lines for accessor/helper methods
Verified with: go test -race ./weed/wdclient/... (passes)
* consistent
* Fix cache pointer race condition with atomic.Pointer
Use atomic.Pointer for vidMap cache field to prevent data races
during cache trimming in resetVidMap. This addresses the race condition
where concurrent GetLocations calls could read the cache pointer while
resetVidMap is modifying it during cache chain trimming.
Changes:
- Changed cache field from *vidMap to atomic.Pointer[vidMap]
- Updated all cache access to use Load() and Store() atomic operations
- Updated shallowClone, GetLocations, deleteLocation, deleteEcLocation
- Updated resetVidMap to use atomic operations for cache trimming
* Merge: Resolve conflict in deleteEcLocation - keep atomic.Pointer and fix bug
Resolved merge conflict by combining:
1. Atomic pointer access pattern (from HEAD): cache.Load()
2. Correct method call (from fix): deleteEcLocation (not deleteLocation)
Resolution:
- Before (HEAD): cachedMap.deleteLocation() - WRONG, reintroduced bug
- Before (fix): vc.cache.deleteEcLocation() - RIGHT method, old pattern
- After (merged): cachedMap.deleteEcLocation() - RIGHT method, new pattern
This preserves both improvements:
✓ Thread-safe atomic.Pointer access pattern
✓ Correct recursive call to deleteEcLocation
Verified with: go test -race ./weed/wdclient/... (passes)
* Update vid_map.go
* remove shallow clone
* simplify
							
						 | 
						4 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								9f07bca9cc
								
									 | 
						
							
							
								
								Fix IPv6 host header formatting to match AWS SDK behavior (#7414)
							
							
							
							
							
							
								
* Add nginx reverse proxy documentation for S3 API Fixes #7407 Add comprehensive documentation and example configuration for using nginx as a reverse proxy with SeaweedFS S3 API while maintaining AWS Signature V4 authentication compatibility. Changes: - Add docker/nginx/README.md with detailed setup guide - Add docker/nginx/s3-example.conf with working configuration - Update docker/nginx/proxy.conf with important S3 notes The documentation covers: - Critical requirements for AWS Signature V4 authentication - Common mistakes and why they break S3 authentication - Complete working nginx configurations - Debugging tips and troubleshooting - Performance tuning recommendations * Fix IPv6 host header formatting to match AWS SDK behavior Follow-up to PR #7403 When a default port (80 for HTTP, 443 for HTTPS) is stripped from an IPv6 address, the square brackets should also be removed to match AWS SDK behavior for S3 signature calculation. Reference: https://github.com/aws/aws-sdk-go-v2/blob/main/aws/signer/internal/v4/host.go The AWS SDK's stripPort function explicitly removes brackets when returning an IPv6 address without a port. Changes: - Update extractHostHeader to strip brackets from IPv6 addresses when no port or default port is used - Update test expectations to match AWS SDK behavior - Add detailed comments explaining the AWS SDK compatibility requirement This ensures S3 signature validation works correctly with IPv6 addresses behind reverse proxies, matching AWS S3 canonical request format. Fixes the issue raised in PR #7403 comment: https://github.com/seaweedfs/seaweedfs/pull/7403#issuecomment-3471105438 * Update docker/nginx/README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * Add nginx reverse proxy documentation for S3 API Fixes #7407 Add comprehensive documentation and example configuration for using nginx as a reverse proxy with SeaweedFS S3 API while maintaining AWS Signature V4 authentication compatibility. Changes: - Add docker/nginx/README.md with detailed setup guide - Add docker/nginx/s3-example.conf with working configuration - Update docker/nginx/proxy.conf with important S3 notes The documentation covers: - Critical requirements for AWS Signature V4 authentication - Common mistakes and why they break S3 authentication - Complete working nginx configurations - Debugging tips and troubleshooting - Performance tuning recommendations Fix IPv6 host header formatting to match AWS SDK behavior Follow-up to PR #7403 When a default port (80 for HTTP, 443 for HTTPS) is stripped from an IPv6 address, the square brackets should also be removed to match AWS SDK behavior for S3 signature calculation. Reference: https://github.com/aws/aws-sdk-go-v2/blob/main/aws/signer/internal/v4/host.go The AWS SDK's stripPort function explicitly removes brackets when returning an IPv6 address without a port. Changes: - Update extractHostHeader to strip brackets from IPv6 addresses when no port or default port is used - Update test expectations to match AWS SDK behavior - Add detailed comments explaining the AWS SDK compatibility requirement This ensures S3 signature validation works correctly with IPv6 addresses behind reverse proxies, matching AWS S3 canonical request format. Fixes the issue raised in PR #7403 comment: https://github.com/seaweedfs/seaweedfs/pull/7403#issuecomment-3471105438 * Revert "Merge branch 'fix-ipv6-brackets-default-port' of https://github.com/seaweedfs/seaweedfs into fix-ipv6-brackets-default-port" This reverts commit  | 
						4 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								5810aba763
								
									 | 
						
							
							
								
								Filer: fallback to check master (#7411)
							
							
							
							
							
							
								
* fallback to check master * clean up * parsing * refactor * handle parse error * return error * avoid dup lookup * use batch key * dedup lookup logic * address comments * errors.Join(lookupErrors...) * add a comment  | 
						4 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								ba07b3e4c6
								
									 | 
						
							
							
								
								network: Adaptive timeout (#7410)
							
							
							
							
							
							
								
* server can start when no network for local dev * fixed superfluous response.WriteHeader call" warning * adaptive based on last write time * more doc * refactoring  | 
						4 days ago | 
| 
							
							
								 | 
						f5a57a6463 | 
							
							
								
								fixed superfluous response.WriteHeader call" warning
							
							
							
							
								
 | 
						5 days ago | 
| 
							
							
								 | 
						a6da3eb770 | 
							
							
								
								server can start when no network for local dev
							
							
							
							
								
 | 
						5 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								d00a2a8707
								
									 | 
						
							
							
								
								Fix S3 bucket policy ARN validation to accept AWS ARNs and simplified formats (#7409)
							
							
							
							
							
							
								
* Fix S3 bucket policy ARN validation to accept AWS ARNs and simplified formats Fixes #7252 The bucket policy validation was rejecting valid AWS-style ARNs and simplified resource formats, causing validation failures with the error 'resource X does not match bucket X' even when they were identical strings. Changes: - Updated validateResourceForBucket() to accept three formats: 1. AWS-style ARNs: arn:aws:s3:::bucket-name[/*|/path] 2. SeaweedFS ARNs: arn:seaweed:s3:::bucket-name[/*|/path] 3. Simplified formats: bucket-name[/*|/path] - Added comprehensive test coverage for all three formats - Added specific test cases from issue #7252 to prevent regression This ensures compatibility with standard AWS S3 bucket policies while maintaining support for SeaweedFS-specific ARN format. * Refactor validateResourceForBucket to reduce code duplication Simplified the validation logic by stripping ARN prefixes first, then performing validation on the remaining resource path. This reduces code duplication and improves maintainability while maintaining identical functionality. Addresses review feedback from Gemini Code Assist. * Use strings.CutPrefix for cleaner ARN prefix stripping Replace strings.HasPrefix checks with strings.CutPrefix for more idiomatic Go code. This function is available in Go 1.20+ and provides cleaner conditional logic with the combined check and extraction. Addresses review feedback from Gemini Code Assist.  | 
						5 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								9b6b564235
								
									 | 
						
							
							
								
								Filer: Add retry mechanism for failed file deletions (#7402)
							
							
							
							
							
							
								
* Filer: Add retry mechanism for failed file deletions Implement a retry queue with exponential backoff for handling transient deletion failures, particularly when volumes are temporarily read-only. Key features: - Automatic retry for retryable errors (read-only volumes, network issues) - Exponential backoff: 5min → 10min → 20min → ... (max 6 hours) - Maximum 10 retry attempts per file before giving up - Separate goroutine processing retry queue every minute - Enhanced logging with retry/permanent error classification This addresses the issue where file deletions fail when volumes are temporarily read-only (tiered volumes, maintenance, etc.) and these deletions were previously lost. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com> * Update weed/filer/filer_deletion.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * Filer: Add retry mechanism for failed file deletions Implement a retry queue with exponential backoff for handling transient deletion failures, particularly when volumes are temporarily read-only. Key features: - Automatic retry for retryable errors (read-only volumes, network issues) - Exponential backoff: 5min → 10min → 20min → ... (max 6 hours) - Maximum 10 retry attempts per file before giving up - Separate goroutine processing retry queue every minute - Map-based retry queue for O(1) lookups and deletions - Enhanced logging with retry/permanent error classification - Consistent error detail limiting (max 10 total errors logged) - Graceful shutdown support with quit channel for both processors This addresses the issue where file deletions fail when volumes are temporarily read-only (tiered volumes, maintenance, etc.) and these deletions were previously lost. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com> * Filer: Replace magic numbers with named constants in retry processor Replace hardcoded values with package-level constants for better maintainability: - DeletionRetryPollInterval (1 minute): interval for checking retry queue - DeletionRetryBatchSize (1000): max items to process per iteration This improves code readability and makes configuration changes easier. * Filer: Optimize retry queue with min-heap data structure Replace map-based retry queue with a min-heap for better scalability and deterministic ordering. Performance improvements: - GetReadyItems: O(N) → O(K log N) where K is items retrieved - AddOrUpdate: O(1) → O(log N) (acceptable trade-off) - Early exit when checking ready items (heap top is earliest) - No full iteration over all items while holding lock Benefits: - Deterministic processing order (earliest NextRetryAt first) - Better scalability for large retry queues (thousands of items) - Reduced lock contention duration - Memory efficient (no separate slice reconstruction) Implementation: - Min-heap ordered by NextRetryAt using container/heap - Dual index: heap for ordering + map for O(1) FileId lookups - heap.Fix() used when updating existing items - Comprehensive complexity documentation in comments This addresses the performance bottleneck identified in GetReadyItems where iterating over the entire map with a write lock could block other goroutines in high-failure scenarios. * Filer: Modernize heap interface and improve error handling docs 1. Replace interface{} with any in heap methods - Addresses modern Go style (Go 1.18+) - Improves code readability 2. Enhance isRetryableError documentation - Acknowledge string matching brittleness - Add comprehensive TODO for future improvements: * Use HTTP status codes (503, 429, etc.) * Implement structured error types with errors.Is/As * Extract gRPC status codes * Add error wrapping for better context - Document each error pattern with context - Add defensive check for empty error strings Current implementation remains pragmatic for initial release while documenting a clear path for future robustness improvements. String matching is acceptable for now but should be replaced with structured error checking when refactoring the deletion pipeline. * Filer: Refactor deletion processors for better readability Extract large callback functions into dedicated private methods to improve code organization and maintainability. Changes: 1. Extract processDeletionBatch method - Handles deletion of a batch of file IDs - Classifies errors (success, not found, retryable, permanent) - Manages retry queue additions - Consolidates logging logic 2. Extract processRetryBatch method - Handles retry attempts for previously failed deletions - Processes retry results and updates queue - Symmetric to processDeletionBatch for consistency Benefits: - Main loop functions (loopProcessingDeletion, loopProcessingDeletionRetry) are now concise and focused on orchestration - Business logic is separated into testable methods - Reduced nesting depth improves readability - Easier to understand control flow at a glance - Better separation of concerns The refactored methods follow the single responsibility principle, making the codebase more maintainable and easier to extend. * Update weed/filer/filer_deletion.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * Filer: Fix critical retry count bug and add comprehensive error patterns Critical bug fixes from PR review: 1. Fix RetryCount reset bug (CRITICAL) - Problem: When items are re-queued via AddOrUpdate, RetryCount resets to 1, breaking exponential backoff - Solution: Add RequeueForRetry() method that preserves retry state - Impact: Ensures proper exponential backoff progression 2. Add overflow protection in backoff calculation - Check shift amount > 63 to prevent bit-shift overflow - Additional safety: check if delay <= 0 or > MaxRetryDelay - Protects against arithmetic overflow in extreme cases 3. Expand retryable error patterns - Added: timeout, deadline exceeded, context canceled - Added: lookup error/failed (volume discovery issues) - Added: connection refused, broken pipe (network errors) - Added: too many requests, service unavailable (backpressure) - Added: temporarily unavailable, try again (transient errors) - Added: i/o timeout (network timeouts) Benefits: - Retry mechanism now works correctly across restarts - More robust against edge cases and overflow - Better coverage of transient failure scenarios - Improved resilience in high-failure environments Addresses feedback from CodeRabbit and Gemini Code Assist in PR #7402. * Filer: Add persistence docs and comprehensive unit tests Documentation improvements: 1. Document in-memory queue limitation - Acknowledge that retry queue is volatile (lost on restart) - Document trade-offs and future persistence options - Provide clear path for production hardening - Note eventual consistency through main deletion queue Unit test coverage: 1. TestDeletionRetryQueue_AddAndRetrieve - Basic add/retrieve operations - Verify items not ready before delay elapsed 2. TestDeletionRetryQueue_ExponentialBackoff - Verify exponential backoff progression (5m→10m→20m→40m→80m) - Validate delay calculations with timing tolerance 3. TestDeletionRetryQueue_OverflowProtection - Test high retry counts (60+) that could cause overflow - Verify capping at MaxRetryDelay 4. TestDeletionRetryQueue_MaxAttemptsReached - Verify items discarded after MaxRetryAttempts - Confirm proper queue cleanup 5. TestIsRetryableError - Comprehensive error pattern coverage - Test all retryable error types (timeout, connection, lookup, etc.) - Verify non-retryable errors correctly identified 6. TestDeletionRetryQueue_HeapOrdering - Verify min-heap property maintained - Test items processed in NextRetryAt order - Validate heap.Init() integration All tests passing. Addresses PR feedback on testing requirements. * Filer: Add code quality improvements for deletion retry Address PR feedback with minor optimizations: - Add MaxLoggedErrorDetails constant (replaces magic number 10) - Pre-allocate slices and maps in processRetryBatch for efficiency - Improve log message formatting to use constant These changes improve code maintainability and runtime performance without altering functionality. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com> * refactoring retrying * use constant * assert * address comment * refactor * address comments * dedup * process retried deletions * address comment * check in-flight items also; dedup code * refactoring * refactoring * simplify * reset heap * more efficient * add DeletionBatchSize as a constant;Permanent > Retryable > Success > Not Found --------- Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: chrislu <chris.lu@gmail.com> Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>  | 
						5 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								7e624d5355
								
									 | 
						
							
							
								
								* Fix s3 auth with proxy request (#7403)
							
							
							
							
							
							
								
* * Fix s3 auth with proxy request * * 6649 Add unit test for signature v4 * address comments * fix for tests * ipv6 * address comments * setting scheme Works for both cases (direct HTTPS and behind proxy) * trim for ipv6 * Corrected Scheme Precedence Order * trim * accurate --------- Co-authored-by: chrislu <chris.lu@gmail.com> Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>  | 
						5 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								eaecd8328b
								
									 | 
						
							
							
								
								S3: add fallback for CORS (#7404)
							
							
							
							
							
							
								
* add fallback for cors * refactor * expose aws headers * add fallback to test * refactor * Only falls back to global config when there's explicitly no bucket-level config. * fmt * Update s3_cors_http_test.go * refactoring  | 
						5 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								d4790cb8e6
								
									 | 
						
							
							
								
								s3: fix if-match error (#7277)
							
							
							
							
							
							
								
* s3: fix if-match error * add more checks * minor * minor --------- Co-authored-by: chrislu <chris.lu@gmail.com> Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>  | 
						6 days ago | 
| 
							
							
								 | 
						ed023f4a7d | 
							
							
								
								purge emojis
							
							
							
							
								
 | 
						6 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								ec4f7cf33c
								
									 | 
						
							
							
								
								Filer: Fixed critical bugs in the Azure SDK migration (PR #7310) (#7401)
							
							
							
							
							
							
								
* Fixed critical bugs in the Azure SDK migration (PR #7310) fix https://github.com/seaweedfs/seaweedfs/issues/5044 * purge emojis * conditional delete * Update azure_sink_test.go * refactoring * refactor * add context to each call * refactor * address comments * refactor * defer * DeleteSnapshots The conditional delete in handleExistingBlob was missing DeleteSnapshots, which would cause the delete operation to fail on Azure storage accounts that have blob snapshots enabled. * ensure the expected size * adjust comment  | 
						6 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								85bd593936
								
									 | 
						
							
							
								
								S3: adjust for loading credentials (#7400)
							
							
							
							
							
							
								
* adjust for loading credentials * Update weed/s3api/auth_credentials_test.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * simplify --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>  | 
						6 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								d19eca71eb
								
									 | 
						
							
							
								
								[master] vaccum fix warn (#7312)
							
							
							
							
								
 | 
						6 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								20e0d91037
								
									 | 
						
							
							
								
								IAM: add support for advanced IAM config file to server command (#7317)
							
							
							
							
							
							
								
* IAM: add support for advanced IAM config file to server command * Add support for advanced IAM config file in S3 options * Fix S3 IAM config handling to simplify checks for configuration presence * simplify * simplify again * copy the value * const --------- Co-authored-by: chrislu <chris.lu@gmail.com> Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>  | 
						6 days ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								7d26c8838f
								
									 | 
						
							
							
								
								S3: auth supports X-Forwarded-Host and X-Forwarded-Port (#7398)
							
							
							
							
							
							
								
* add fix and tests * address comments * idiomatic * ipv6  | 
						6 days ago | 
| 
							
							
								 | 
						b7ba6785a2 | 
							
							
								
								go fmt
							
							
							
							
								
 | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								208d7f24f4
								
									 | 
						
							
							
								
								Erasure Coding: Ec refactoring (#7396)
							
							
							
							
							
							
								
* refactor: add ECContext structure to encapsulate EC parameters
- Create ec_context.go with ECContext struct
- NewDefaultECContext() creates context with default 10+4 configuration
- Helper methods: CreateEncoder(), ToExt(), String()
- Foundation for cleaner function signatures
- No behavior change, still uses hardcoded 10+4
* refactor: update ec_encoder.go to use ECContext
- Add WriteEcFilesWithContext() and RebuildEcFilesWithContext() functions
- Keep old functions for backward compatibility (call new versions)
- Update all internal functions to accept ECContext parameter
- Use ctx.DataShards, ctx.ParityShards, ctx.TotalShards consistently
- Use ctx.CreateEncoder() instead of hardcoded reedsolomon.New()
- Use ctx.ToExt() for shard file extensions
- No behavior change, still uses default 10+4 configuration
* refactor: update ec_volume.go to use ECContext
- Add ECContext field to EcVolume struct
- Initialize ECContext with default configuration in NewEcVolume()
- Update LocateEcShardNeedleInterval() to use ECContext.DataShards
- Phase 1: Always uses default 10+4 configuration
- No behavior change
* refactor: add EC shard count fields to VolumeInfo protobuf
- Add data_shards_count field (field 8) to VolumeInfo message
- Add parity_shards_count field (field 9) to VolumeInfo message
- Fields are optional, 0 means use default (10+4)
- Backward compatible: fields added at end
- Phase 1: Foundation for future customization
* refactor: regenerate protobuf Go files with EC shard count fields
- Regenerated volume_server_pb/*.go with new EC fields
- DataShardsCount and ParityShardsCount accessors added to VolumeInfo
- No behavior change, fields not yet used
* refactor: update VolumeEcShardsGenerate to use ECContext
- Create ECContext with default configuration in VolumeEcShardsGenerate
- Use ecCtx.TotalShards and ecCtx.ToExt() in cleanup
- Call WriteEcFilesWithContext() instead of WriteEcFiles()
- Save EC configuration (DataShardsCount, ParityShardsCount) to VolumeInfo
- Log EC context being used
- Phase 1: Always uses default 10+4 configuration
- No behavior change
* fmt
* refactor: update ec_test.go to use ECContext
- Update TestEncodingDecoding to create and use ECContext
- Update validateFiles() to accept ECContext parameter
- Update removeGeneratedFiles() to use ctx.TotalShards and ctx.ToExt()
- Test passes with default 10+4 configuration
* refactor: use EcShardConfig message instead of separate fields
* optimize: pre-calculate row sizes in EC encoding loop
* refactor: replace TotalShards field with Total() method
- Remove TotalShards field from ECContext to avoid field drift
- Add Total() method that computes DataShards + ParityShards
- Update all references to use ctx.Total() instead of ctx.TotalShards
- Read EC config from VolumeInfo when loading EC volumes
- Read data shard count from .vif in VolumeEcShardsToVolume
- Use >= instead of > for exact boundary handling in encoding loops
* optimize: simplify VolumeEcShardsToVolume to use existing EC context
- Remove redundant CollectEcShards call
- Remove redundant .vif file loading
- Use v.ECContext.DataShards directly (already loaded by NewEcVolume)
- Slice tempShards instead of collecting again
* refactor: rename MaxShardId to MaxShardCount for clarity
- Change from MaxShardId=31 to MaxShardCount=32
- Eliminates confusing +1 arithmetic (MaxShardId+1)
- More intuitive: MaxShardCount directly represents the limit
fix: support custom EC ratios beyond 14 shards in VolumeEcShardsToVolume
- Add MaxShardId constant (31, since ShardBits is uint32)
- Use MaxShardId+1 (32) instead of TotalShardsCount (14) for tempShards buffer
- Prevents panic when slicing for volumes with >14 total shards
- Critical fix for custom EC configurations like 20+10
* fix: add validation for EC shard counts from VolumeInfo
- Validate DataShards/ParityShards are positive and within MaxShardCount
- Prevent zero or invalid values that could cause divide-by-zero
- Fallback to defaults if validation fails, with warning log
- VolumeEcShardsGenerate now preserves existing EC config when regenerating
- Critical safety fix for corrupted or legacy .vif files
* fix: RebuildEcFiles now loads EC config from .vif file
- Critical: RebuildEcFiles was always using default 10+4 config
- Now loads actual EC config from .vif file when rebuilding shards
- Validates config before use (positive shards, within MaxShardCount)
- Falls back to default if .vif missing or invalid
- Prevents data corruption when rebuilding custom EC volumes
* add: defensive validation for dataShards in VolumeEcShardsToVolume
- Validate dataShards > 0 and <= MaxShardCount before use
- Prevents panic from corrupted or uninitialized ECContext
- Returns clear error message instead of panic
- Defense-in-depth: validates even though upstream should catch issues
* fix: replace TotalShardsCount with MaxShardCount for custom EC ratio support
Critical fixes to support custom EC ratios > 14 shards:
disk_location_ec.go:
- validateEcVolume: Check shards 0-31 instead of 0-13 during validation
- removeEcVolumeFiles: Remove shards 0-31 instead of 0-13 during cleanup
ec_volume_info.go ShardBits methods:
- ShardIds(): Iterate up to MaxShardCount (32) instead of TotalShardsCount (14)
- ToUint32Slice(): Iterate up to MaxShardCount (32)
- IndexToShardId(): Iterate up to MaxShardCount (32)
- MinusParityShards(): Remove shards 10-31 instead of 10-13 (added note about Phase 2)
- Minus() shard size copy: Iterate up to MaxShardCount (32)
- resizeShardSizes(): Iterate up to MaxShardCount (32)
Without these changes:
- Custom EC ratios > 14 total shards would fail validation on startup
- Shards 14-31 would never be discovered or cleaned up
- ShardBits operations would miss shards >= 14
These changes are backward compatible - MaxShardCount (32) includes
the default TotalShardsCount (14), so existing 10+4 volumes work as before.
* fix: replace TotalShardsCount with MaxShardCount in critical data structures
Critical fixes for buffer allocations and loops that must support
custom EC ratios up to 32 shards:
Data Structures:
- store_ec.go:354: Buffer allocation for shard recovery (bufs array)
- topology_ec.go:14: EcShardLocations.Locations fixed array size
- command_ec_rebuild.go:268: EC shard map allocation
- command_ec_common.go:626: Shard-to-locations map allocation
Shard Discovery Loops:
- ec_task.go:378: Loop to find generated shard files
- ec_shard_management.go: All 8 loops that check/count EC shards
These changes are critical because:
1. Buffer allocations sized to 14 would cause index-out-of-bounds panics
   when accessing shards 14-31
2. Fixed arrays sized to 14 would truncate shard location data
3. Loops limited to 0-13 would never discover/manage shards 14-31
Note: command_ec_encode.go:208 intentionally NOT changed - it creates
shard IDs to mount after encoding. In Phase 1 we always generate 14
shards, so this remains TotalShardsCount and will be made dynamic in
Phase 2 based on actual EC context.
Without these fixes, custom EC ratios > 14 total shards would cause:
- Runtime panics (array index out of bounds)
- Data loss (shards 14-31 never discovered/tracked)
- Incomplete shard management (missing shards not detected)
* refactor: move MaxShardCount constant to ec_encoder.go
Moved MaxShardCount from ec_volume_info.go to ec_encoder.go to group it
with other shard count constants (DataShardsCount, ParityShardsCount,
TotalShardsCount). This improves code organization and makes it easier
to understand the relationship between these constants.
Location: ec_encoder.go line 22, between TotalShardsCount and MinTotalDisks
* improve: add defensive programming and better error messages for EC
Code review improvements from CodeRabbit:
1. ShardBits Guardrails (ec_volume_info.go):
   - AddShardId, RemoveShardId: Reject shard IDs >= MaxShardCount
   - HasShardId: Return false for out-of-range shard IDs
   - Prevents silent no-ops from bit shifts with invalid IDs
2. Future-Proof Regex (disk_location_ec.go):
   - Updated regex from \.ec[0-9][0-9] to \.ec\d{2,3}
   - Now matches .ec00 through .ec999 (currently .ec00-.ec31 used)
   - Supports future increases to MaxShardCount beyond 99
3. Better Error Messages (volume_grpc_erasure_coding.go):
   - Include valid range (1..32) in dataShards validation error
   - Helps operators quickly identify the problem
4. Validation Before Save (volume_grpc_erasure_coding.go):
   - Validate ECContext (DataShards > 0, ParityShards > 0, Total <= MaxShardCount)
   - Log EC config being saved to .vif for debugging
   - Prevents writing invalid configs to disk
These changes improve robustness and debuggability without changing
core functionality.
* fmt
* fix: critical bugs from code review + clean up comments
Critical bug fixes:
1. command_ec_rebuild.go: Fixed indentation causing compilation error
   - Properly nested if/for blocks in registerEcNode
2. ec_shard_management.go: Fixed isComplete logic incorrectly using MaxShardCount
   - Changed from MaxShardCount (32) back to TotalShardsCount (14)
   - Default 10+4 volumes were being incorrectly reported as incomplete
   - Missing shards 14-31 were being incorrectly reported as missing
   - Fixed in 4 locations: volume completeness checks and getMissingShards
3. ec_volume_info.go: Fixed MinusParityShards removing too many shards
   - Changed from MaxShardCount (32) back to TotalShardsCount (14)
   - Was incorrectly removing shard IDs 10-31 instead of just 10-13
Comment cleanup:
- Removed Phase 1/Phase 2 references (development plan context)
- Replaced with clear statements about default 10+4 configuration
- SeaweedFS repo uses fixed 10+4 EC ratio, no phases needed
Root cause: Over-aggressive replacement of TotalShardsCount with MaxShardCount.
MaxShardCount (32) is the limit for buffer allocations and shard ID loops,
but TotalShardsCount (14) must be used for default EC configuration logic.
* fix: add defensive bounds checks and compute actual shard counts
Critical fixes from code review:
1. topology_ec.go: Add defensive bounds checks to AddShard/DeleteShard
   - Prevent panic when shardId >= MaxShardCount (32)
   - Return false instead of crashing on out-of-range shard IDs
2. command_ec_common.go: Fix doBalanceEcShardsAcrossRacks
   - Was using hardcoded TotalShardsCount (14) for all volumes
   - Now computes actual totalShardsForVolume from rackToShardCount
   - Fixes incorrect rebalancing for volumes with custom EC ratios
   - Example: 5+2=7 shards would incorrectly use 14 as average
These fixes improve robustness and prepare for future custom EC ratios
without changing current behavior for default 10+4 volumes.
Note: MinusParityShards and ec_task.go intentionally NOT changed for
seaweedfs repo - these will be enhanced in seaweed-enterprise repo
where custom EC ratio configuration is added.
* fmt
* style: make MaxShardCount type casting explicit in loops
Improved code clarity by explicitly casting MaxShardCount to the
appropriate type when used in loop comparisons:
- ShardId comparisons: Cast to ShardId(MaxShardCount)
- uint32 comparisons: Cast to uint32(MaxShardCount)
Changed in 5 locations:
- Minus() loop (line 90)
- ShardIds() loop (line 143)
- ToUint32Slice() loop (line 152)
- IndexToShardId() loop (line 219)
- resizeShardSizes() loop (line 248)
This makes the intent explicit and improves type safety readability.
No functional changes - purely a style improvement.
							
						 | 
						1 week ago | 
| 
							
							
								 | 
						decfb07eea | 
							
							
								
								fix nil
							
							
							
							
								
 | 
						1 week ago | 
| 
							
							
								 | 
						a80b5eea5e | 
							
							
								
								3.99
							
							
							
							
								
 | 
						1 week ago | 
| 
							
							
								 | 
						2ad2ffcdff | 
							
							
								
								fix comment
							
							
							
							
								
 | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								0813138d57
								
									 | 
						
							
							
								
								Volume Server: handle incomplete ec encoding (#7384)
							
							
							
							
							
							
								
* handle incomplete ec encoding * unit tests * simplify, and better logs * Update disk_location_ec.go When loadEcShards() fails partway through, some EC shards may already be loaded into the l.ecVolumes map in memory. The previous code only cleaned up filesystem files but left orphaned in-memory state, which could cause memory leaks and inconsistent state. * address comments * Performance: Avoid Double os.Stat() Call * Platform Compatibility: Use filepath.Join * in memory cleanup * Update disk_location_ec.go * refactor * Added Shard Size Validation * check ec shard sizes * validate shard size * calculate expected shard size * refactoring * minor * fix shard directory * 10GB sparse files can be slow or fail on non-sparse FS. Use 10MB to hit SmallBlockSize math (1MB shards) deterministically. * grouping logic should be updated to use both collection and volumeId to ensure correctness * unexpected error * handle exceptions in tests; use constants * The check for orphaned shards should be performed for the previous volume before resetting sameVolumeShards for the new volume. * address comments * Eliminated Redundant Parsing in checkOrphanedShards * minor * Avoid misclassifying local EC as distributed when .dat stat errors occur; also standardize unload-before-remove. * fmt * refactor * refactor * adjust to warning  | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								824dcac3bf
								
									 | 
						
							
							
								
								s3: combine all signature verification checks into a single function (#7330)
							
							
							
							
								
 | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								6a8c53bc44
								
									 | 
						
							
							
								
								Filer: batch deletion operations to return individual error results (#7382)
							
							
							
							
							
							
								
* batch deletion operations to return individual error results Modify batch deletion operations to return individual error results instead of one aggregated error, enabling better tracking of which specific files failed to delete (helping reduce orphan file issues). * Simplified logging logic * Optimized nested loop * handles the edge case where the RPC succeeds but connection cleanup fails * simplify * simplify * ignore 'not found' errors here  | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								37af41fbfe
								
									 | 
						
							
							
								
								Shell: Added a helper function `isHelpRequest()` (#7380)
							
							
							
							
							
							
								
* Added a helper function `isHelpRequest()` * also handles combined short flags like -lh or -hl * Created handleHelpRequest() helper function encapsulates both: Checking for help flags Printing the help message * Limit to reasonable length (2-4 chars total) to avoid matching long options like -verbose  | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								922bb17194
								
									 | 
						
							
							
								
								Store full shell command in shell history (#7378)
							
							
							
							
							
							
								
Store shell command in history before parsing Store the shell command in history before parsing it. This will allow users to press the 'Up' arrow and see the entire command.  | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								fa35efc076
								
									 | 
						
							
							
								
								Volume Server: Unexpected Deletion of Remote Tier Data (#7377)
							
							
							
							
							
							
								
* [Admin UI] Login not possible due to securecookie error * avoid 404 favicon * Update weed/admin/dash/auth_middleware.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * address comments * avoid variable over shadowing * log session save error * When jwt.signing.read.key is enabled in security.toml, the volume server requires JWT tokens for all read operations. * reuse fileId * refactor * fix deleting remote tier * simplify the fix * Update weed/storage/volume_loading.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/storage/volume_loading.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/storage/volume_loading.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>  | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								263e891da0
								
									 | 
						
							
							
								
								Clients to volume server requires JWT tokens for all read operations (#7376)
							
							
							
							
							
							
								
* [Admin UI] Login not possible due to securecookie error * avoid 404 favicon * Update weed/admin/dash/auth_middleware.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * address comments * avoid variable over shadowing * log session save error * When jwt.signing.read.key is enabled in security.toml, the volume server requires JWT tokens for all read operations. * reuse fileId * refactor --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>  | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								9f4075441c
								
									 | 
						
							
							
								
								[Admin UI] Login not possible due to securecookie error (#7374)
							
							
							
							
							
							
								
* [Admin UI] Login not possible due to securecookie error * avoid 404 favicon * Update weed/admin/dash/auth_middleware.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * address comments * avoid variable over shadowing * log session save error --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>  | 
						1 week ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								bf58c5a688
								
									 | 
						
							
							
								
								fix: Use a mixed of virtual and path styles within a single subdomain (#7357)
							
							
							
							
							
							
								
* fix: Use a mixed of virtual and path styles within a single subdomain * address comments * add tests --------- Co-authored-by: chrislu <chris.lu@gmail.com> Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								1d0471aebb
								
									 | 
						
							
							
								
								Improve admin urls (#7370)
							
							
							
							
							
							
								
* Improve Master and Volume URLs in admin dashboard - Add clickable URL for master node. - Refactor Volume server URL to use PublicURL if set. 'address' is used as fallback. * Make volume servers show in consistent order - Sort servers by name to ensure predictable order after each refresh. * address comment --------- Co-authored-by: chrislu <chris.lu@gmail.com>  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								7d147f238c
								
									 | 
						
							
							
								
								avoid repeated reading disk (#7369)
							
							
							
							
							
							
								
* avoid repeated reading disk * checks both flush time AND read position advancement * wait on cond * fix reading Gap detection and skipping to earliest memory time Time-based reads that include events at boundary times for first reads (offset ≤ 0) Aggregated subscriber wake-up via ListenersWaits signaling * address comments  | 
						2 weeks ago | 
| 
							
							
								 | 
						d220875ef4 | 
							
							
								
								Revert "fix reading"
							
							
							
							
							
							
								
This reverts commit 
							
						 | 
						2 weeks ago | 
| 
							
							
								 | 
						64a4ce9358 | 
							
							
								
								fix reading
							
							
							
							
							
							
								
Gap detection and skipping to earliest memory time Time-based reads that include events at boundary times for first reads (offset ≤ 0) Aggregated subscriber wake-up via ListenersWaits signaling  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								832df5265f
								
									 | 
						
							
							
								
								Fix 'NaN%' issue when running volume.fsck (#7368)
							
							
							
							
							
							
								
* Fix 'NaN%' issue when running volume.fsck - Running `volume.fsck` on an empty cluster will display 'NaN%'. * Refactor - Extract cound of orphan chunks in summary to new var. - Restore handling for 'NaN' for individual volumes. Its not necessary because the check is already done. * Make code more idiomatic  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								0abf70061b
								
									 | 
						
							
							
								
								S3 API: Fix SSE-S3 decryption on object download (#7366)
							
							
							
							
							
							
								
* S3 API: Fix SSE-S3 decryption on object download Fixes #7363 This commit adds missing SSE-S3 decryption support when downloading objects from SSE-S3 encrypted buckets. Previously, SSE-S3 encrypted objects were returned in their encrypted form, causing data corruption and hash mismatches. Changes: - Updated detectPrimarySSEType() to detect SSE-S3 encrypted objects by examining chunk metadata and distinguishing SSE-S3 from SSE-KMS - Added SSE-S3 handling in handleSSEResponse() to route to new handler - Implemented handleSSES3Response() for both single-part and multipart SSE-S3 encrypted objects with proper decryption - Implemented createMultipartSSES3DecryptedReader() for multipart objects with per-chunk decryption using stored IVs - Updated addSSEHeadersToResponse() to include SSE-S3 response headers The fix follows the existing SSE-C and SSE-KMS patterns, using the envelope encryption architecture where each object's DEK is encrypted with the KEK stored in the filer. * Add comprehensive tests for SSE-S3 decryption - TestSSES3EncryptionDecryption: basic encryption/decryption - TestSSES3IsRequestInternal: request detection - TestSSES3MetadataSerialization: metadata serialization/deserialization - TestDetectPrimarySSETypeS3: SSE type detection for various scenarios - TestAddSSES3HeadersToResponse: response header validation - TestSSES3EncryptionWithBaseIV: multipart encryption with base IV - TestSSES3WrongKeyDecryption: wrong key error handling - TestSSES3KeyGeneration: key generation and uniqueness - TestSSES3VariousSizes: encryption/decryption with various data sizes - TestSSES3ResponseHeaders: response header correctness - TestSSES3IsEncryptedInternal: metadata-based encryption detection - TestSSES3InvalidMetadataDeserialization: error handling for invalid metadata - TestGetSSES3Headers: header generation - TestProcessSSES3Request: request processing - TestGetSSES3KeyFromMetadata: key extraction from metadata - TestSSES3EnvelopeEncryption: envelope encryption correctness - TestValidateSSES3Key: key validation All tests pass successfully, providing comprehensive coverage for the SSE-S3 decryption fix. * Address PR review comments 1. Fix resource leak in createMultipartSSES3DecryptedReader: - Wrap decrypted reader with closer to properly release resources - Ensure underlying chunkReader is closed when done 2. Handle mixed-encryption objects correctly: - Check chunk encryption type before attempting decryption - Pass through non-SSE-S3 chunks unmodified - Log encryption type for debugging 3. Improve SSE type detection logic: - Add explicit case for aws:kms algorithm - Handle unknown algorithms gracefully - Better documentation for tie-breaking precedence 4. Document tie-breaking behavior: - Clarify that mixed encryption indicates potential corruption - Explicit precedence order: SSE-C > SSE-KMS > SSE-S3 These changes address high-severity resource management issues and improve robustness when handling edge cases and mixed-encryption scenarios. * Fix IV retrieval for small/inline SSE-S3 encrypted files Critical bug fix: The previous implementation only looked for the IV in chunk metadata, which would fail for small files stored inline (without chunks). Changes: - Check object-level metadata (sseS3Key.IV) first for inline files - Fallback to first chunk metadata only if object-level IV not found - Improved error message to indicate both locations were checked This ensures small SSE-S3 encrypted files (stored inline in entry.Content) can be properly decrypted, as their IV is stored in the object-level SeaweedFSSSES3Key metadata rather than in chunk metadata. Fixes the high-severity issue identified in PR review. * Clean up unused SSE metadata helper functions Remove legacy SSE metadata helper functions that were never fully implemented or used: Removed unused functions: - StoreSSECMetadata() / GetSSECMetadata() - StoreSSEKMSMetadata() / GetSSEKMSMetadata() - StoreSSES3Metadata() / GetSSES3Metadata() - IsSSEEncrypted() - GetSSEAlgorithm() Removed unused constants: - MetaSSEAlgorithm - MetaSSECKeyMD5 - MetaSSEKMSKeyID - MetaSSEKMSEncryptedKey - MetaSSEKMSContext - MetaSSES3KeyID These functions were from an earlier design where IV and other metadata would be stored in common entry.Extended keys. The actual implementations use type-specific serialization: - SSE-C: Uses StoreIVInMetadata()/GetIVFromMetadata() directly for IV - SSE-KMS: Serializes entire SSEKMSKey structure as JSON (includes IV) - SSE-S3: Serializes entire SSES3Key structure as JSON (includes IV) This follows Option A: SSE-S3 uses envelope encryption pattern like SSE-KMS, where IV is stored within the serialized key metadata rather than in a separate metadata field. Kept functions still in use: - StoreIVInMetadata() - Used by SSE-C - GetIVFromMetadata() - Used by SSE-C and streaming copy - MetaSSEIV constant - Used by SSE-C All tests pass after cleanup. * Rename SSE metadata functions to clarify SSE-C specific usage Renamed functions and constants to explicitly indicate they are SSE-C specific, improving code clarity: Renamed: - MetaSSEIV → MetaSSECIV - StoreIVInMetadata() → StoreSSECIVInMetadata() - GetIVFromMetadata() → GetSSECIVFromMetadata() Updated all usages across: - s3api_key_rotation.go - s3api_streaming_copy.go - s3api_object_handlers_copy.go - s3_sse_copy_test.go - s3_sse_test_utils_test.go Rationale: These functions are exclusively used by SSE-C for storing/retrieving the IV in entry.Extended metadata. SSE-KMS and SSE-S3 use different approaches (IV stored in serialized key structures), so the generic names were misleading. The new names make it clear these are part of the SSE-C implementation. All tests pass. * Add integration tests for SSE-S3 end-to-end encryption/decryption These integration tests cover the complete encrypt->store->decrypt cycle that was missing from the original test suite. They would have caught the IV retrieval bug for inline files. Tests added: - TestSSES3EndToEndSmallFile: Tests inline files (10, 50, 256 bytes) * Specifically tests the critical IV retrieval path for inline files * This test explicitly checks the bug we fixed where inline files couldn't retrieve their IV from object-level metadata - TestSSES3EndToEndChunkedFile: Tests multipart encrypted files * Verifies per-chunk metadata serialization/deserialization * Tests that each chunk can be independently decrypted with its own IV - TestSSES3EndToEndWithDetectPrimaryType: Tests type detection * Verifies inline vs chunked SSE-S3 detection * Ensures SSE-S3 is distinguished from SSE-KMS Note: Full HTTP handler tests (PUT -> GET through actual handlers) would require a complete mock server with filer connections, which is complex. These tests focus on the critical decrypt path and data flow. Why these tests are important: - Unit tests alone don't catch integration issues - The IV retrieval bug existed because there was no end-to-end test - These tests simulate the actual storage/retrieval flow - They verify the complete encryption architecture works correctly All tests pass. * Fix TestValidateSSES3Key expectations to match actual implementation The ValidateSSES3Key function only validates that the key struct is not nil, but doesn't validate the Key field contents or size. The test was expecting validation that doesn't exist. Updated test cases: - Nil key struct → should error (correct) - Valid key → should not error (correct) - Invalid key size → should not error (validation doesn't check this) - Nil key bytes → should not error (validation doesn't check this) Added comments to clarify what the current validation actually checks. This matches the behavior of ValidateSSEKMSKey and ValidateSSECKey which also only check for nil struct, not field contents. All SSE tests now pass. * Improve ValidateSSES3Key to properly validate key contents Enhanced the validation function from only checking nil struct to comprehensive validation of all key fields: Validations added: 1. Key bytes not nil 2. Key size exactly 32 bytes (SSES3KeySize) 3. Algorithm must be "AES256" (SSES3Algorithm) 4. Key ID must not be empty 5. IV length must be 16 bytes if set (optional - set during encryption) Test improvements (10 test cases): - Nil key struct - Valid key without IV - Valid key with IV - Invalid key size (too small) - Invalid key size (too large) - Nil key bytes - Empty key ID - Invalid algorithm - Invalid IV length - Empty IV (allowed - set during encryption) This matches the robustness of SSE-C and SSE-KMS validation and will catch configuration errors early rather than failing during encryption/decryption. All SSE tests pass. * Replace custom string helper functions with strings.Contains Address Gemini Code Assist review feedback: - Remove custom contains() and findSubstring() helper functions - Use standard library strings.Contains() instead - Add strings import This makes the code more idiomatic and easier to maintain by using the standard library instead of reimplementing functionality. Changes: - Added "strings" to imports - Replaced contains(err.Error(), tc.errorMsg) with strings.Contains(err.Error(), tc.errorMsg) - Removed 15 lines of custom helper code All tests pass. * filer fix reading and writing SSE-S3 headers * filter out seaweedfs internal headers * Update weed/s3api/s3api_object_handlers.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/s3api/s3_validation_utils.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update s3api_streaming_copy.go * remove fallback * remove redundant check * refactor * remove extra object fetching * in case object is not found * Correct Version Entry for SSE Routing * Proper Error Handling for SSE Entry Fetching * Eliminated All Redundant Lookups * Removed brittle “exactly 5 successes/failures” assertions. Added invariant checks total recorded attempts equals request count, successes never exceed capacity, failures cover remaining attempts, final AvailableSpace matches capacity - successes. * refactor * fix test * Fixed Broken Fallback Logic * refactor * Better Error for Encryption Type Mismatch * refactor --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								f06ddd05cc
								
									 | 
						
							
							
								
								Improve-worker (#7367)
							
							
							
							
							
							
								
* ♻️ refactor(worker): remove goto * ♻️ refactor(worker): let manager loop exit by itself * ♻️ refactor(worker): fix race condition when closing worker CloseSend is not safe to call when another goroutine concurrently calls Send. streamCancel already handles proper stream closure. Also, streamExit signal should be called AFTER sending shutdownMsg Now the worker has no race condition if stopped during any moment (hopefully, tested with -race flag) * 🐛 fix(task_logger): deadlock in log closure * 🐛 fix(balance): fix balance task Removes the outdated "UnloadVolume" step as it is handled by "DeleteVolume". #7346  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								557aa4ec09
								
									 | 
						
							
							
								
								fixing auto complete (#7365)
							
							
							
							
							
							
								
* fixing auto complete * Update weed/command/autocomplete.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/command/autocomplete.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>  | 
						2 weeks ago | 
| 
							
							
								 | 
						cb9a662e20 | 
							
							
								
								Update volume_growth_reservation_test.go
							
							
							
							
								
 | 
						2 weeks ago | 
| 
							
							
								 | 
						30e8133524 | 
							
							
								
								Update volume_growth_reservation_test.go
							
							
							
							
								
 | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								fa025dc96f
								
									 | 
						
							
							
								
								♻️ refactor(worker): decouple state management using command-query pattern (#7354)
							
							
							
							
							
							
								
* ♻️ refactor(worker): decouple state management using command-query pattern This commit eliminates all uses of sync.Mutex across the `worker.go` and `client.go` components, changing how mutable state is accessed and modified. Single Owner Principle is now enforced. - Guarantees thread safety and prevents data races by ensuring that only one goroutine ever modifies or reads state. Impact: Improves application concurrency, reliability, and maintainability by isolating state concerns. * 🐛 fix(worker): fix race condition when closing The use of select/default is wrong for mandatory shutdown signals. * 🐛 fix(worker): do not get tickers in every iteration * 🐛 fix(worker): fix race condition when closing pt 2 refactor `handleOutgoing` to mirror the non-blocking logic of `handleIncoming` * address comments * To ensure stream errors are always processed, the send should be blocking. * avoid blocking the manager loop while waiting for tasks to complete --------- Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com> Co-authored-by: Chris Lu <chris.lu@gmail.com>  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								f7bd75ef3b
								
									 | 
						
							
							
								
								S3: Avoid in-memory map concurrent writes in SSE-S3 key manager (#7358)
							
							
							
							
							
							
								
* Fix concurrent map writes in SSE-S3 key manager This commit fixes issue #7352 where parallel uploads to SSE-S3 enabled buckets were causing 'fatal error: concurrent map writes' crashes. The SSES3KeyManager struct had an unsynchronized map that was being accessed from multiple goroutines during concurrent PUT operations. Changes: - Added sync.RWMutex to SSES3KeyManager struct - Protected StoreKey() with write lock - Protected GetKey() with read lock - Updated GetOrCreateKey() with proper read/write locking pattern including double-check to prevent race conditions All existing SSE tests pass successfully. Fixes #7352 * Improve SSE-S3 key manager with envelope encryption Replace in-memory key storage with envelope encryption using a super key (KEK). Instead of storing DEKs in a map, the key manager now: - Uses a randomly generated 256-bit super key (KEK) - Encrypts each DEK with the super key using AES-GCM - Stores the encrypted DEK in object metadata - Decrypts the DEK on-demand when reading objects Benefits: - Eliminates unbounded memory growth from caching DEKs - Provides better security with authenticated encryption (AES-GCM) - Follows envelope encryption best practices (similar to AWS KMS) - No need for mutex-protected map lookups on reads - Each object's encrypted DEK is self-contained in its metadata This approach matches the design pattern used in the local KMS provider and is more suitable for production use. * Persist SSE-S3 KEK in filer for multi-server support Store the SSE-S3 super key (KEK) in the filer at /.seaweedfs/s3/kek instead of generating it per-server. This ensures: 1. **Multi-server consistency**: All S3 API servers use the same KEK 2. **Persistence across restarts**: KEK survives server restarts 3. **Centralized management**: KEK stored in filer, accessible to all servers 4. **Automatic initialization**: KEK is created on first startup if it doesn't exist The KEK is: - Stored as hex-encoded bytes in filer - Protected with file mode 0600 (read/write for owner only) - Located in /.seaweedfs/s3/ directory (mode 0700) - Loaded on S3 API server startup - Reused across all S3 API server instances This matches the architecture of centralized configuration in SeaweedFS and enables proper SSE-S3 support in multi-server deployments. * Change KEK storage location to /etc/s3/kek Move SSE-S3 KEK from /.seaweedfs/s3/kek to /etc/s3/kek for better organization and consistency with other SeaweedFS configuration files. The /etc directory is the standard location for configuration files in SeaweedFS. * use global sse-se key manager when copying * Update volume_growth_reservation_test.go * Rename KEK file to sse_kek for clarity Changed /etc/s3/kek to /etc/s3/sse_kek to make it clear this key is specifically for SSE-S3 encryption, not for other KMS purposes. This improves clarity and avoids potential confusion with the separate KMS provider system used for SSE-KMS. * Use constants for SSE-S3 KEK directory and file name Refactored to use named constants instead of string literals: - SSES3KEKDirectory = "/etc/s3" - SSES3KEKParentDir = "/etc" - SSES3KEKDirName = "s3" - SSES3KEKFileName = "sse_kek" This improves maintainability and makes it easier to change the storage location if needed in the future. * Address PR review: Improve error handling and robustness Addresses review comments from https://github.com/seaweedfs/seaweedfs/pull/7358#pullrequestreview-3367476264 Critical fixes: 1. Distinguish between 'not found' and other errors when loading KEK - Only generate new KEK if ErrNotFound - Fail fast on connectivity/permission errors to prevent data loss - Prevents creating new KEK that would make existing data undecryptable 2. Make SSE-S3 initialization failure fatal - Return error instead of warning when initialization fails - Prevents server from running in broken state 3. Improve directory creation error handling - Only ignore 'file exists' errors - Fail on permission/connectivity errors These changes ensure the SSE-S3 key manager is robust against transient errors and prevents accidental data loss. * Fix KEK path conflict with /etc/s3 file Changed KEK storage from /etc/s3/sse_kek to /etc/seaweedfs/s3_sse_kek to avoid conflict with the circuit breaker config at /etc/s3. The /etc/s3 path is used by CircuitBreakerConfigDir and may exist as a file (circuit_breaker.json), causing the error: 'CreateEntry /etc/s3/sse_kek: /etc/s3 should be a directory' New KEK location: /etc/seaweedfs/s3_sse_kek This uses the seaweedfs subdirectory which is more appropriate for internal SeaweedFS configuration files. Fixes startup failure when /etc/s3 exists as a file. * Revert KEK path back to /etc/s3/sse_kek Changed back from /etc/seaweedfs/s3_sse_kek to /etc/s3/sse_kek as requested. The /etc/s3 directory will be created properly when it doesn't exist. * Fix directory creation with proper ModeDir flag Set FileMode to uint32(0755 | os.ModeDir) when creating /etc/s3 directory to ensure it's created as a directory, not a file. Without the os.ModeDir flag, the entry was being created as a file, which caused the error 'CreateEntry: /etc/s3 is a file' when trying to create the KEK file inside it. Uses 0755 permissions (rwxr-xr-x) for the directory and adds os import for os.ModeDir constant.  | 
						2 weeks ago | 
| 
							
							
								 | 
						
							
							
								
								
							
							
								
							
								aed91baa2e
								
									 | 
						
							
							
								
								[weed] update volume.fix.replication description (#7340)
							
							
							
							
							
							
								
* [weed] update volume.fix.replication description * Update master-cloud.toml * Update master.toml  | 
						2 weeks ago | 
| 
							
							
								 | 
						9b7ed67311 | 
							
							
								
								Switch to seaweedfs/cockroachdb-parser directly
							
							
							
							
							
							
								
Removed the replace directive and updated all references to use github.com/seaweedfs/cockroachdb-parser directly instead of redirecting from github.com/cockroachdb/cockroachdb-parser. Changes: - Updated import in weed/query/engine/cockroach_parser.go - Removed replace directive from go.mod - Now using seaweedfs/cockroachdb-parser@909763b17138 which includes all 32-bit architecture fixes and uses seaweedfs namespace throughout Build verified successfully for GOOS=openbsd GOARCH=arm.  | 
						2 weeks ago | 
| 
							
							
								 | 
						d3095f0c2a | 
							
							
								
								3.98
							
							
							
							
								
 | 
						2 weeks ago |