From 51ef39fc76dd2e8f53682f060ef57871912f1bc4 Mon Sep 17 00:00:00 2001 From: Emanuele Leopardi <28568180+emanuele-leopardi@users.noreply.github.com> Date: Wed, 28 Jan 2026 22:08:20 +0100 Subject: [PATCH 1/2] Update Helm hook annotations for post-install and upgrade (#8150) * Update Helm hook annotations for post-install and upgrade I believe it makes sense to allow this job to run also after installation. Assuming weed shell is idempotent, and assuming someone wants to add a new bucket after the initial installation, it makes sense to trigger the job again. * Add check for existing buckets before creation * Enhances S3 bucket existence check Improves the reliability of checking for existing S3 buckets in the post-install hook. The previous `grep -w` command could lead to imprecise matches. This update extracts only the bucket name and performs an exact, whole-line match to ensure accurate detection of existing buckets. This prevents potential issues with redundant creation attempts or false negatives. * Currently Bucket Creation is ignored if filer.s3.enabled is disabled This commit enables bucket creation on both scenarios,i.e. if any of filer.s3.enabled or s3.enabled are used. --------- Co-authored-by: Emanuele --- .../shared/post-install-bucket-hook.yaml | 25 ++++++++++++------- k8s/charts/seaweedfs/values.yaml | 9 ++++++- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/k8s/charts/seaweedfs/templates/shared/post-install-bucket-hook.yaml b/k8s/charts/seaweedfs/templates/shared/post-install-bucket-hook.yaml index 269919d19..930c474e9 100644 --- a/k8s/charts/seaweedfs/templates/shared/post-install-bucket-hook.yaml +++ b/k8s/charts/seaweedfs/templates/shared/post-install-bucket-hook.yaml @@ -15,14 +15,18 @@ {{- $existingConfigSecret = or .Values.allInOne.s3.existingConfigSecret .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret }} {{- end }} {{- else if .Values.master.enabled }} - {{- /* Check standalone filer.s3 mode */}} - {{- if .Values.filer.s3.enabled }} + {{- /* Check if embedded (in filer) or standalone S3 gateway is enabled */}} + {{- if or .Values.filer.s3.enabled .Values.s3.enabled }} {{- $s3Enabled = true }} - {{- if .Values.filer.s3.createBuckets }} + {{- if .Values.s3.createBuckets }} + {{- $createBuckets = .Values.s3.createBuckets }} + {{- $enableAuth = .Values.s3.enableAuth }} + {{- $existingConfigSecret = .Values.s3.existingConfigSecret }} + {{- else if .Values.filer.s3.createBuckets }} {{- $createBuckets = .Values.filer.s3.createBuckets }} + {{- $enableAuth = .Values.filer.s3.enableAuth }} + {{- $existingConfigSecret = .Values.filer.s3.existingConfigSecret }} {{- end }} - {{- $enableAuth = .Values.filer.s3.enableAuth }} - {{- $existingConfigSecret = .Values.filer.s3.existingConfigSecret }} {{- end }} {{- end }} @@ -36,7 +40,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service | quote }} app.kubernetes.io/instance: {{ .Release.Name | quote }} annotations: - "helm.sh/hook": post-install + "helm.sh/hook": post-install,post-upgrade "helm.sh/hook-weight": "-5" "helm.sh/hook-delete-policy": hook-succeeded spec: @@ -105,9 +109,12 @@ spec: wait_for_service "http://$WEED_CLUSTER_SW_FILER{{ .Values.filer.readinessProbe.httpGet.path }}" {{- end }} {{- range $createBuckets }} - /bin/echo \ - "s3.bucket.create --name {{ .name }}" |\ - /usr/bin/weed shell + if /bin/echo "s3.bucket.list" | /usr/bin/weed shell | awk '{print $1}' | grep -Fxq "{{ .name }}"; then + echo "Bucket '{{ .name }}' already exists, skipping creation." + else + echo "Creating bucket '{{ .name }}'..." + /bin/echo "s3.bucket.create --name {{ .name }}" | /usr/bin/weed shell + fi {{- end }} {{- range $createBuckets }} {{- if .anonymousRead }} diff --git a/k8s/charts/seaweedfs/values.yaml b/k8s/charts/seaweedfs/values.yaml index 35befc3d1..f9d93dab6 100644 --- a/k8s/charts/seaweedfs/values.yaml +++ b/k8s/charts/seaweedfs/values.yaml @@ -891,7 +891,7 @@ filer: # should have a secret key called seaweedfs_s3_config with an inline json configure existingConfigSecret: null auditLogConfig: {} - # You may specify buckets to be created during the install process. + # You may specify buckets to be created during the install or upgrade process. # Buckets may be exposed publicly by setting `anonymousRead` to `true` # createBuckets: # - name: bucket-a @@ -916,6 +916,13 @@ s3: # should have a secret key called seaweedfs_s3_config with an inline json config existingConfigSecret: null auditLogConfig: {} + # You may specify buckets to be created during the install or upgrade process. + # Buckets may be exposed publicly by setting `anonymousRead` to `true` + # createBuckets: + # - name: bucket-a + # anonymousRead: true + # - name: bucket-b + # anonymousRead: false # Suffix of the host name, {bucket}.{domainName} domainName: "" From 5c8de5e282961fcb68007b68e1a27bbdc6154cfe Mon Sep 17 00:00:00 2001 From: Ping Qiu Date: Wed, 28 Jan 2026 17:21:14 -0800 Subject: [PATCH 2/2] fix: close volumes and EC shards in tests for Windows compatibility (#8152) * fix: close volumes and EC shards in tests to prevent Windows cleanup failures On Windows, t.TempDir() cleanup fails when test files are still open because Windows enforces mandatory file locking. Add defer v.Close(), defer store.Close(), and EC volume cleanup to ensure all file handles are released before temp directory removal. Co-Authored-By: Claude Opus 4.5 * refactor: extract closeEcVolumes helper to reduce duplication Address code review feedback by extracting the repeated EC volume cleanup loop into a closeEcVolumes() helper function. Co-Authored-By: Claude Opus 4.5 --------- Co-authored-by: Claude Opus 4.5 --- weed/storage/disk_location_ec_test.go | 20 ++++++++++++++++++++ weed/storage/idx_binary_search_test.go | 1 + weed/storage/store_load_balancing_test.go | 5 ++++- weed/storage/volume_read_test.go | 2 ++ weed/storage/volume_vacuum_test.go | 1 + weed/storage/volume_write_test.go | 2 ++ 6 files changed, 30 insertions(+), 1 deletion(-) diff --git a/weed/storage/disk_location_ec_test.go b/weed/storage/disk_location_ec_test.go index 23b7d8c40..e40870b7f 100644 --- a/weed/storage/disk_location_ec_test.go +++ b/weed/storage/disk_location_ec_test.go @@ -11,6 +11,13 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" ) +// closeEcVolumes closes all EC volumes in the given DiskLocation to release file handles. +func closeEcVolumes(dl *DiskLocation) { + for _, ecVol := range dl.ecVolumes { + ecVol.Close() + } +} + // TestIncompleteEcEncodingCleanup tests the cleanup logic for incomplete EC encoding scenarios func TestIncompleteEcEncodingCleanup(t *testing.T) { tests := []struct { @@ -182,11 +189,18 @@ func TestIncompleteEcEncodingCleanup(t *testing.T) { t.Logf("loadAllEcShards returned error (expected in some cases): %v", loadErr) } + // Close EC volumes before idempotency test to avoid leaking file handles + closeEcVolumes(diskLocation) + diskLocation.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume) + // Test idempotency - running again should not cause issues loadErr2 := diskLocation.loadAllEcShards(nil) if loadErr2 != nil { t.Logf("Second loadAllEcShards returned error: %v", loadErr2) } + t.Cleanup(func() { + closeEcVolumes(diskLocation) + }) // Verify cleanup expectations if tt.expectCleanup { @@ -554,6 +568,9 @@ func TestEcCleanupWithSeparateIdxDirectory(t *testing.T) { if loadErr != nil { t.Logf("loadAllEcShards error: %v", loadErr) } + t.Cleanup(func() { + closeEcVolumes(diskLocation) + }) // Verify cleanup occurred in data directory (shards) for i := 0; i < erasure_coding.TotalShardsCount; i++ { @@ -625,6 +642,9 @@ func TestDistributedEcVolumeNoFileDeletion(t *testing.T) { if loadErr != nil { t.Logf("loadAllEcShards returned error (expected): %v", loadErr) } + t.Cleanup(func() { + closeEcVolumes(diskLocation) + }) // CRITICAL CHECK: Verify shard files still exist (should NOT be deleted) for i := 0; i < 5; i++ { diff --git a/weed/storage/idx_binary_search_test.go b/weed/storage/idx_binary_search_test.go index e04185bcd..77d38e562 100644 --- a/weed/storage/idx_binary_search_test.go +++ b/weed/storage/idx_binary_search_test.go @@ -18,6 +18,7 @@ func TestFirstInvalidIndex(t *testing.T) { if err != nil { t.Fatalf("volume creation: %v", err) } + defer v.Close() type WriteInfo struct { offset int64 size int32 diff --git a/weed/storage/store_load_balancing_test.go b/weed/storage/store_load_balancing_test.go index 35475a6ae..a16d1474d 100644 --- a/weed/storage/store_load_balancing_test.go +++ b/weed/storage/store_load_balancing_test.go @@ -45,7 +45,10 @@ func newTestStore(t *testing.T, numDirs int) *Store { } } }() - t.Cleanup(func() { close(done) }) + t.Cleanup(func() { + store.Close() + close(done) + }) return store } diff --git a/weed/storage/volume_read_test.go b/weed/storage/volume_read_test.go index efedadc31..d3fb31c64 100644 --- a/weed/storage/volume_read_test.go +++ b/weed/storage/volume_read_test.go @@ -16,6 +16,7 @@ func TestReadNeedMetaWithWritesAndUpdates(t *testing.T) { if err != nil { t.Fatalf("volume creation: %v", err) } + defer v.Close() type WriteInfo struct { offset int64 size int32 @@ -55,6 +56,7 @@ func TestReadNeedMetaWithDeletesThenWrites(t *testing.T) { if err != nil { t.Fatalf("volume creation: %v", err) } + defer v.Close() type WriteInfo struct { offset int64 size int32 diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index 29b990f70..9022fc5c7 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -119,6 +119,7 @@ func testCompaction(t *testing.T, needleMapKind NeedleMapKind) { if err != nil { t.Fatalf("volume reloading: %v", err) } + defer v.Close() for i := 1; i <= beforeCommitFileCount+afterCommitFileCount; i++ { diff --git a/weed/storage/volume_write_test.go b/weed/storage/volume_write_test.go index ce7c184bd..f3e8ebe01 100644 --- a/weed/storage/volume_write_test.go +++ b/weed/storage/volume_write_test.go @@ -21,6 +21,7 @@ func TestSearchVolumesWithDeletedNeedles(t *testing.T) { if err != nil { t.Fatalf("volume creation: %v", err) } + defer v.Close() count := 20 @@ -119,6 +120,7 @@ func TestDestroyNonemptyVolumeWithOnlyEmpty(t *testing.T) { if err != nil { t.Fatalf("volume creation: %v", err) } + defer v.Close() path := v.DataBackend.Name() // should return "volume not empty" error and do not delete file when Destroy non-empty volume