Browse Source

Merge branch 'refs/heads/master' into fix_rclone_tests

# Conflicts:
#	.github/workflows/s3tests.yml
#	Makefile
pull/5443/head
Konstantin Lebedev 8 months ago
parent
commit
2cdbbc6c5e
  1. 8
      .github/workflows/binaries_dev.yml
  2. 4
      .github/workflows/binaries_release0.yml
  3. 4
      .github/workflows/binaries_release1.yml
  4. 4
      .github/workflows/binaries_release2.yml
  5. 4
      .github/workflows/binaries_release3.yml
  6. 4
      .github/workflows/binaries_release4.yml
  7. 10
      .github/workflows/container_dev.yml
  8. 10
      .github/workflows/container_latest.yml
  9. 8
      .github/workflows/container_release1.yml
  10. 8
      .github/workflows/container_release2.yml
  11. 8
      .github/workflows/container_release3.yml
  12. 10
      .github/workflows/container_release4.yml
  13. 10
      .github/workflows/container_release5.yml
  14. 2
      .github/workflows/depsreview.yml
  15. 2
      .github/workflows/helm_ci.yml
  16. 133
      .github/workflows/s3tests.yml
  17. 7
      .gitignore
  18. 2
      Makefile
  19. 1
      README.md
  20. 4
      docker/Dockerfile.local
  21. 2
      docker/Dockerfile.s3tests
  22. 6
      docker/Makefile
  23. 7
      docker/compose/local-brokers-compose.yml
  24. 184
      go.mod
  25. 1354
      go.sum
  26. 5
      k8s/charts/seaweedfs/Chart.yaml
  27. 4
      k8s/charts/seaweedfs/README.md
  28. 3402
      k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
  29. 45
      k8s/charts/seaweedfs/templates/_helpers.tpl
  30. 35
      k8s/charts/seaweedfs/templates/cluster-role.yaml
  31. 4
      k8s/charts/seaweedfs/templates/filer-ingress.yaml
  32. 3
      k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml
  33. 34
      k8s/charts/seaweedfs/templates/filer-statefulset.yaml
  34. 4
      k8s/charts/seaweedfs/templates/master-ingress.yaml
  35. 3
      k8s/charts/seaweedfs/templates/master-servicemonitor.yaml
  36. 34
      k8s/charts/seaweedfs/templates/master-statefulset.yaml
  37. 6
      k8s/charts/seaweedfs/templates/post-install-bucket-hook.yaml
  38. 24
      k8s/charts/seaweedfs/templates/s3-deployment.yaml
  39. 3
      k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml
  40. 37
      k8s/charts/seaweedfs/templates/service-account.yaml
  41. 3
      k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml
  42. 28
      k8s/charts/seaweedfs/templates/volume-statefulset.yaml
  43. 168
      k8s/charts/seaweedfs/values.yaml
  44. BIN
      note/keepsec.png
  45. BIN
      note/piknik.png
  46. 13
      other/java/client/src/main/proto/filer.proto
  47. 3
      unmaintained/repeated_vacuum/repeated_vacuum.go
  48. 3
      unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
  49. 3
      unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go
  50. 3
      unmaintained/volume_tailer/volume_tailer.go
  51. 3
      weed/cluster/lock_manager/distributed_lock_manager.go
  52. 11
      weed/cluster/lock_manager/lock_ring.go
  53. 3
      weed/command/backup.go
  54. 8
      weed/command/benchmark.go
  55. 3
      weed/command/download.go
  56. 2
      weed/command/filer_backup.go
  57. 2
      weed/command/filer_copy.go
  58. 6
      weed/command/filer_meta_backup.go
  59. 2
      weed/command/filer_remote_gateway_buckets.go
  60. 2
      weed/command/filer_remote_sync_dir.go
  61. 58
      weed/command/fix.go
  62. 9
      weed/command/master.go
  63. 2
      weed/command/master_follower.go
  64. 7
      weed/command/scaffold/filer.toml
  65. 2
      weed/command/server.go
  66. 4
      weed/command/upload.go
  67. 6
      weed/filer/abstract_sql/abstract_sql_store.go
  68. 6
      weed/filer/filechunk_manifest.go
  69. 36
      weed/filer/filer.go
  70. 25
      weed/filer/filer_conf.go
  71. 6
      weed/filer/filer_delete_entry.go
  72. 59
      weed/filer/filer_deletion.go
  73. 2
      weed/filer/filer_notify.go
  74. 12
      weed/filer/filerstore_wrapper.go
  75. 6
      weed/filer/leveldb2/leveldb2_store.go
  76. 9
      weed/filer/leveldb3/leveldb3_store.go
  77. 2
      weed/filer/meta_aggregator.go
  78. 38
      weed/filer/meta_replay.go
  79. 72
      weed/filer/mongodb/mongodb_store.go
  80. 12
      weed/filer/remote_storage.go
  81. 17
      weed/filer/stream.go
  82. 2
      weed/filer/ydb/ydb_store.go
  83. 2
      weed/ftpd/ftp_server.go
  84. 5
      weed/iamapi/iamapi_management_handlers.go
  85. 2
      weed/iamapi/iamapi_server.go
  86. 2
      weed/iamapi/iamapi_test.go
  87. 2
      weed/images/cropping.go
  88. 2
      weed/images/resizing.go
  89. 6
      weed/mount/inode_to_path.go
  90. 8
      weed/mount/page_writer/page_chunk_mem.go
  91. 16
      weed/mount/page_writer/page_chunk_swapfile.go
  92. 23
      weed/mount/weedfs.go
  93. 6
      weed/mount/weedfs_file_sync.go
  94. 9
      weed/mq/broker/broker_grpc_assign.go
  95. 9
      weed/mq/broker/broker_grpc_configure.go
  96. 134
      weed/mq/broker/broker_grpc_pub.go
  97. 198
      weed/mq/broker/broker_grpc_pub_follow.go
  98. 212
      weed/mq/broker/broker_grpc_sub.go
  99. 9
      weed/mq/broker/broker_grpc_sub_coordinator.go
  100. 5
      weed/mq/broker/broker_server.go

8
.github/workflows/binaries_dev.yml

@ -44,7 +44,7 @@ jobs:
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -60,7 +60,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -93,7 +93,7 @@ jobs:
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -109,7 +109,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

4
.github/workflows/binaries_release0.yml

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

4
.github/workflows/binaries_release1.yml

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

4
.github/workflows/binaries_release2.yml

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

4
.github/workflows/binaries_release3.yml

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

4
.github/workflows/binaries_release4.yml

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -45,7 +45,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@b173bce0484dc9f34c585181e5db16bd8756a21e # v1.22
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

10
.github/workflows/container_dev.yml

@ -33,30 +33,30 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v1
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v1
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v2
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

10
.github/workflows/container_latest.yml

@ -34,30 +34,30 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v1
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v1
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v2
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release1.yml

@ -34,20 +34,20 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v1
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v1
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v2
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release2.yml

@ -35,20 +35,20 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v1
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v1
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v2
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release3.yml

@ -35,20 +35,20 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v1
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v1
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v2
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

10
.github/workflows/container_release4.yml

@ -34,25 +34,25 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v1
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v1
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v2
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=elastic,ydb,gocdk,tikv,rclone
build-args: TAGS=elastic,gocdk,sqlite,ydb,tikv,rclone
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

10
.github/workflows/container_release5.yml

@ -34,25 +34,25 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v1
uses: docker/setup-qemu-action@5927c834f5b4fdf503fca6f4c7eccda82949e1ee # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v1
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v1
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v2
uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=5BytesOffset,elastic,ydb,gocdk,tikv,rclone
build-args: TAGS=5BytesOffset,elastic,gocdk,sqlite,ydb,tikv,rclone
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

2
.github/workflows/depsreview.yml

@ -11,4 +11,4 @@ jobs:
- name: 'Checkout Repository'
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- name: 'Dependency Review'
uses: actions/dependency-review-action@5bbc3ba658137598168acb2ab73b21c432dd411b
uses: actions/dependency-review-action@0659a74c94536054bfa5aeb92241f70d680cc78e

2
.github/workflows/helm_ci.yml

@ -45,7 +45,7 @@ jobs:
run: ct lint --target-branch ${{ github.event.repository.default_branch }} --all --validate-maintainers=false --chart-dirs k8s/charts
- name: Create kind cluster
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.10.0
- name: Run chart-testing (install)
run: ct install --target-branch ${{ github.event.repository.default_branch }} --all --chart-dirs k8s/charts

133
.github/workflows/s3tests.yml

@ -19,10 +19,10 @@ defaults:
jobs:
s3tests:
name: S3 tests
name: Ceph S3 tests
runs-on: ubuntu-22.04
container:
image: docker.io/kmlebedev/ceph-s3-tests:0.0.3
image: docker.io/kmlebedev/ceph-s3-tests:0.0.2
timeout-minutes: 30
steps:
- name: Check out code into the Go module directory
@ -34,30 +34,30 @@ jobs:
go-version-file: 'go.mod'
id: go
- name: Build weed
shell: bash
run: |
cd /__w/seaweedfs/seaweedfs/weed
go install -buildvcs=false
- name: Run Ceph S3 tests
- name: Run Ceph S3 tests with KV store
timeout-minutes: 15
env:
S3TEST_CONF: /__w/seaweedfs/seaweedfs/docker/compose/s3tests.conf
shell: bash
run: |
cd /__w/seaweedfs/seaweedfs/weed
go install -buildvcs=false
set -x
(nohup weed -v 0 server -filer -s3 -ip.bind 0.0.0.0 \
weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \
-master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=1024 \
-volume.max=100 -volume.preStopSeconds=1 -s3.port=8000 -metricsPort=9324 \
-s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json || true) &
-s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json &
pid=$!
sleep 10
cd /s3-tests
sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests_boto3/functional/test_s3.py
tox -- \
s3tests_boto3/functional/test_s3.py::test_bucket_list_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_list_distinct \
s3tests_boto3/functional/test_s3.py::test_bucket_list_many \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_many \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_encoding_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_ends_with_delimiter \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_alt \
@ -98,6 +98,7 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_maxkeys_zero \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_none \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_both_continuationtoken_startafter \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_unreadable \
@ -168,10 +169,19 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_object_copy_key_not_found \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_small \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_without_range \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_special_names \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_get_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_empty \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_contents \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_size_too_small \
s3tests_boto3/functional/test_s3.py::test_multipart_resend_first_finishes_last \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_resend_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_missing_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_incorrect_etag \
s3tests_boto3/functional/test_s3.py::test_abort_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_list_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_atomic_read_1mb \
@ -184,32 +194,93 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_4mb \
s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_8mb \
s3tests_boto3/functional/test_s3.py::test_atomic_multipart_upload_write \
s3tests_boto3/functional/test_s3.py::test_multipart_resend_first_finishes_last \
s3tests_boto3/functional/test_s3.py::test_ranged_request_response_code \
s3tests_boto3/functional/test_s3.py::test_ranged_big_request_response_code \
s3tests_boto3/functional/test_s3.py::test_ranged_request_skip_leading_bytes_response_code \
s3tests_boto3/functional/test_s3.py::test_ranged_request_return_trailing_bytes_response_code \
s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_good \
s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_failed
pkill -9 weed
- uses: actions/checkout@v4
with:
repository: "rclone/rclone"
path: "/__w/seaweedfs/seaweedfs/rclone"
s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_failed \
s3tests_boto3/functional/test_s3.py::test_lifecycle_set \
s3tests_boto3/functional/test_s3.py::test_lifecycle_get \
s3tests_boto3/functional/test_s3.py::test_lifecycle_set_filter
kill -9 $pid || true
- name: Run Rclone S3 tests
- name: Run Ceph S3 tests with SQL store
timeout-minutes: 15
env:
S3TEST_CONF: /__w/seaweedfs/seaweedfs/docker/compose/s3tests.conf
shell: bash
run: |
cd /__w/seaweedfs/seaweedfs/weed
go install -tags "sqlite" -buildvcs=false
export WEED_LEVELDB2_ENABLED="false" WEED_SQLITE_ENABLED="true" WEED_SQLITE_DBFILE="./filer.db"
set -x
(nohup weed -v 0 server -filer -s3 -ip.bind 0.0.0.0 \
-master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=1024 \
-volume.max=100 -volume.preStopSeconds=1 -s3.port=8000 -metricsPort=9324 \
-s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json || true) &
sleep 10
mkdir -p ~/.config/rclone
cp /__w/seaweedfs/seaweedfs/docker/compose/rclone.conf ~/.config/rclone/rclone.conf
cd /__w/seaweedfs/seaweedfs/rclone/backend/s3
go test -v -remote swfs:
pkill weed
weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \
-master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=1024 \
-volume.max=100 -volume.preStopSeconds=1 -s3.port=8000 -metricsPort=9324 \
-s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json &
pid=$!
sleep 10
cd /s3-tests
sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests_boto3/functional/test_s3.py
tox -- \
s3tests_boto3/functional/test_s3.py::test_bucket_list_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_list_distinct \
s3tests_boto3/functional/test_s3.py::test_bucket_list_many \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_many \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_encoding_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_ends_with_delimiter \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_alt \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_underscore \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_percentage \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_whitespace \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_dot \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_unreadable \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_none \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_alt \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_alt \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_delimiter_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_delimiter_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_fetchowner_notempty \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_basic \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_alt \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_alt \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_none \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_none \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_not_exist \
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_unreadable \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_unreadable \
s3tests_boto3/functional/test_s3.py::test_bucket_list_maxkeys_one \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_maxkeys_one \
s3tests_boto3/functional/test_s3.py::test_bucket_list_maxkeys_zero \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_maxkeys_zero \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_none \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken_empty \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_both_continuationtoken_startafter \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_unreadable \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_unreadable \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_not_in_list \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_not_in_list \
s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_after_list \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_after_list \
s3tests_boto3/functional/test_s3.py::test_bucket_list_objects_anonymous_fail \
s3tests_boto3/functional/test_s3.py::test_bucket_listv2_objects_anonymous_fail \
s3tests_boto3/functional/test_s3.py::test_bucket_list_long_name \
s3tests_boto3/functional/test_s3.py::test_bucket_list_special_prefix
kill -9 $pid || true

7
.gitignore

@ -87,10 +87,15 @@ other/java/hdfs/dependency-reduced-pom.xml
# binary file
weed/weed
weed/mq/client/cmd/weed_pub/weed_pub
weed/mq/client/cmd/weed_pub_kv/weed_pub
docker/weed
# test generated files
weed/*/*.jpg
docker/weed_sub
docker/weed_pub
weed/mq/schema/example.parquet
docker/weed_pub_kv
docker/weed_pub_record
docker/weed_sub_kv
docker/weed_sub_record

2
Makefile

@ -15,7 +15,7 @@ full_install:
cd weed; go install -tags "elastic gocdk sqlite ydb tikv rclone"
server: install
weed -v 0 server -s3 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false -s3.config=./docker/compose/s3.json -metricsPort=9324
weed -v 0 server -s3 -filer -filer.maxMB=64 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
benchmark: install warp_install
pkill weed || true

1
README.md

@ -34,6 +34,7 @@ Your support will be really appreciated by me and other supporters!
### Gold Sponsors
[![nodion](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/sponsor_nodion.png)](https://www.nodion.com)
[![piknik](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/piknik.png)](https://www.piknik.com)
[![keepsec](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/keepsec.png)](https://www.keepsec.ca)
---

4
docker/Dockerfile.local

@ -1,8 +1,8 @@
FROM alpine AS final
LABEL author="Chris Lu"
COPY ./weed /usr/bin/
COPY ./weed_pub /usr/bin/
COPY ./weed_sub /usr/bin/
COPY ./weed_pub* /usr/bin/
COPY ./weed_sub* /usr/bin/
RUN mkdir -p /etc/seaweedfs
COPY ./filer.toml /etc/seaweedfs/filer.toml
COPY ./entrypoint.sh /entrypoint.sh

2
docker/Dockerfile.s3tests

@ -25,7 +25,7 @@ ENV \
NOSETESTS_EXCLUDE="" \
NOSETESTS_ATTR="" \
NOSETESTS_OPTIONS="" \
S3TEST_CONF="/s3test.conf"
S3TEST_CONF="/s3tests.conf"
ENTRYPOINT ["/bin/bash", "-c"]
CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]

6
docker/Makefile

@ -9,8 +9,10 @@ binary:
export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)"
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" && mv weed ../docker/
cd ../weed/mq/client/cmd/weed_pub && CGO_ENABLED=$(cgo) GOOS=linux go build && mv weed_pub ../../../../../docker/
cd ../weed/mq/client/cmd/weed_sub && CGO_ENABLED=$(cgo) GOOS=linux go build && mv weed_sub ../../../../../docker/
cd ../weed/mq/client/cmd/weed_pub_kv && CGO_ENABLED=$(cgo) GOOS=linux go build && mv weed_pub_kv ../../../../../docker/
cd ../weed/mq/client/cmd/weed_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv weed_pub_record ../../../../../docker/
cd ../weed/mq/client/cmd/weed_sub_kv && CGO_ENABLED=$(cgo) GOOS=linux go build && mv weed_sub_kv ../../../../../docker/
cd ../weed/mq/client/cmd/weed_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv weed_sub_record ../../../../../docker/
binary_race: options = -race
binary_race: cgo = 1

7
docker/compose/local-brokers-compose.yml

@ -6,7 +6,7 @@ services:
ports:
- 9333:9333
- 19333:19333
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
@ -16,7 +16,7 @@ services:
ports:
- 9334:9334
- 19334:19334
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
@ -26,7 +26,7 @@ services:
ports:
- 9335:9335
- 19335:19335
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
@ -85,6 +85,7 @@ services:
- master2
- volume1
- volume2
- filer1
broker1:
image: chrislusf/seaweedfs:local
ports:

184
go.mod

@ -3,13 +3,13 @@ module github.com/seaweedfs/seaweedfs
go 1.22.1
require (
cloud.google.com/go v0.112.1 // indirect
cloud.google.com/go/pubsub v1.37.0
cloud.google.com/go/storage v1.39.1
cloud.google.com/go v0.115.0 // indirect
cloud.google.com/go/pubsub v1.39.0
cloud.google.com/go/storage v1.42.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.38.1
github.com/aws/aws-sdk-go v1.50.36
github.com/aws/aws-sdk-go v1.54.13
github.com/beorn7/perks v1.0.1 // indirect
github.com/bwmarrin/snowflake v0.3.0
github.com/cenkalti/backoff/v4 v4.3.0
@ -18,7 +18,6 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/disintegration/imaging v1.6.2
github.com/dustin/go-humanize v1.0.1
github.com/eapache/go-resiliency v1.3.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
@ -28,22 +27,20 @@ require (
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/fclairamb/ftpserverlib v0.24.0
github.com/fclairamb/ftpserverlib v0.24.1
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-redis/redis/v8 v8.11.5
github.com/go-redsync/redsync/v4 v4.11.0
github.com/go-redsync/redsync/v4 v4.13.0
github.com/go-sql-driver/mysql v1.8.1
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gocql/gocql v1.6.0
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2
github.com/google/uuid v1.6.0
github.com/google/wire v0.6.0 // indirect
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
github.com/gorilla/mux v1.8.1
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@ -56,12 +53,11 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.12
github.com/karlseguin/ccache/v2 v2.0.8
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/klauspost/reedsolomon v1.12.1
github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.9
github.com/linxGnu/grocksdb v1.8.14
github.com/linxGnu/grocksdb v1.9.2
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect
@ -75,10 +71,10 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.2.0
github.com/prometheus/client_golang v1.19.0
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.13.0
github.com/prometheus/procfs v0.15.1
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/seaweedfs/goexif v1.0.3
@ -86,7 +82,7 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/viper v1.18.2
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.9.0
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
@ -96,82 +92,81 @@ require (
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
github.com/valyala/bytebufferpool v1.0.0
github.com/viant/ptrie v0.3.1
github.com/viant/toolbox v0.34.5 // indirect
github.com/viant/ptrie v1.0.1
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect
go.etcd.io/etcd/client/v3 v3.5.12
go.mongodb.org/mongo-driver v1.14.0
go.etcd.io/etcd/client/v3 v3.5.13
go.mongodb.org/mongo-driver v1.15.0
go.opencensus.io v0.24.0 // indirect
gocloud.dev v0.37.0
gocloud.dev/pubsub/natspubsub v0.36.0
gocloud.dev/pubsub/rabbitpubsub v0.36.0
golang.org/x/crypto v0.21.0 // indirect
gocloud.dev/pubsub/natspubsub v0.37.0
gocloud.dev/pubsub/rabbitpubsub v0.37.0
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
golang.org/x/image v0.15.0
golang.org/x/net v0.22.0
golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sys v0.18.0
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.17.0
golang.org/x/image v0.18.0
golang.org/x/net v0.27.0
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.22.0
golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.23.0
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/api v0.172.0
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 // indirect
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.33.0
google.golang.org/api v0.187.0
google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect
google.golang.org/grpc v1.64.0
google.golang.org/protobuf v1.34.2
gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect
modernc.org/cc/v3 v3.40.0 // indirect
modernc.org/ccgo/v3 v3.16.13 // indirect
modernc.org/libc v1.29.0 // indirect
modernc.org/libc v1.52.1 // indirect
modernc.org/mathutil v1.6.0
modernc.org/memory v1.7.2 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/sqlite v1.28.0
modernc.org/memory v1.8.0 // indirect
modernc.org/sqlite v1.30.1
modernc.org/strutil v1.2.0
modernc.org/token v1.0.1 // indirect
modernc.org/token v1.1.0 // indirect
)
require (
github.com/Jille/raft-grpc-transport v1.5.0
github.com/arangodb/go-driver v1.6.1
github.com/arangodb/go-driver v1.6.2
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go-v2 v1.26.1
github.com/aws/aws-sdk-go-v2/config v1.27.7
github.com/aws/aws-sdk-go-v2/credentials v1.17.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4
github.com/aws/aws-sdk-go-v2 v1.30.1
github.com/aws/aws-sdk-go-v2/config v1.27.16
github.com/aws/aws-sdk-go-v2/credentials v1.17.18
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.0
github.com/cognusion/imaging v1.0.1
github.com/fluent/fluent-logger-golang v1.9.0
github.com/getsentry/sentry-go v0.27.0
github.com/getsentry/sentry-go v0.28.1
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
github.com/hanwen/go-fuse/v2 v2.5.0
github.com/hashicorp/raft v1.6.1
github.com/hanwen/go-fuse/v2 v2.5.1
github.com/hashicorp/raft v1.7.0
github.com/hashicorp/raft-boltdb/v2 v2.3.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/rabbitmq/amqp091-go v1.9.0
github.com/parquet-go/parquet-go v0.22.0
github.com/rabbitmq/amqp091-go v1.10.0
github.com/rclone/rclone v1.66.0
github.com/rdleal/intervalst v1.3.0
github.com/schollz/progressbar/v3 v3.14.2
github.com/shirou/gopsutil/v3 v3.24.1
github.com/rdleal/intervalst v1.4.0
github.com/schollz/progressbar/v3 v3.14.4
github.com/shirou/gopsutil/v3 v3.24.5
github.com/tikv/client-go/v2 v2.0.7
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.2.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.56.1
go.etcd.io/etcd/client/pkg/v3 v3.5.13
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.4.2
github.com/ydb-platform/ydb-go-sdk/v3 v3.68.1
go.etcd.io/etcd/client/pkg/v3 v3.5.14
go.uber.org/atomic v1.11.0
google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1
golang.org/x/sync v0.7.0
google.golang.org/grpc/security/advancedtls v1.0.0
)
require (
cloud.google.com/go/compute v1.25.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.6 // indirect
cloud.google.com/go/auth v0.6.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.8 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.1.1 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
@ -187,24 +182,25 @@ require (
github.com/PuerkitoBio/goquery v1.8.1 // indirect
github.com/Unknwon/goconfig v1.0.0 // indirect
github.com/abbot/go-http-auth v0.4.0 // indirect
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/andybalholm/cascadia v1.3.2 // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.13 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.29.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.31.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.20.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.28.4 // indirect
github.com/aws/smithy-go v1.20.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 // indirect
github.com/aws/smithy-go v1.20.3 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/bradenaw/juniper v0.15.2 // indirect
github.com/buengese/sgzip v0.1.1 // indirect
@ -242,6 +238,7 @@ require (
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/henrybear327/Proton-API-Bridge v1.0.0 // indirect
github.com/henrybear327/go-proton-api v1.0.0 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
@ -254,7 +251,7 @@ require (
github.com/jtolio/eventkit v0.0.0-20231019094657-5d77ebb407d9 // indirect
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/koofr/go-httpclient v0.0.0-20230225102643-5d51a2e9dea6 // indirect
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
github.com/kr/fs v0.1.0 // indirect
@ -262,22 +259,23 @@ require (
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/montanaflynn/stats v0.7.0 // indirect
github.com/nats-io/nats.go v1.31.0 // indirect
github.com/nats-io/nkeys v0.4.6 // indirect
github.com/nats-io/nats.go v1.34.0 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/ncw/swift/v2 v2.0.2 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/gomega v1.27.10 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/oracle/oci-go-sdk/v65 v65.55.1 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
github.com/pingcap/kvproto v0.0.0-20230403051650-e166ae588106 // indirect
@ -292,6 +290,7 @@ require (
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/segmentio/encoding v0.3.6 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/smartystreets/goconvey v1.8.1 // indirect
@ -309,15 +308,15 @@ require (
github.com/twmb/murmur3 v1.1.3 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20240126124512-dbb0e1720dbf // indirect
github.com/ydb-platform/ydb-go-yc v0.10.2 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20240316140903-4a47abca1cca // indirect
github.com/ydb-platform/ydb-go-yc v0.12.1 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zeebo/blake3 v0.2.3 // indirect
github.com/zeebo/errs v1.3.0 // indirect
go.etcd.io/bbolt v1.3.8 // indirect
go.etcd.io/etcd/api/v3 v3.5.12 // indirect
go.etcd.io/etcd/api/v3 v3.5.13 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
@ -325,18 +324,17 @@ require (
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/mod v0.19.0 // indirect
golang.org/x/term v0.22.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/uint128 v1.2.0 // indirect
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect
storj.io/common v0.0.0-20240111121419-ecae1362576c // indirect
storj.io/drpc v0.0.33 // indirect
storj.io/infectious v0.0.2 // indirect

1354
go.sum
File diff suppressed because it is too large
View File

5
k8s/charts/seaweedfs/Chart.yaml

@ -1,5 +1,6 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "3.64"
version: 3.64.0
appVersion: "3.69"
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
version: 4.0.0

4
k8s/charts/seaweedfs/README.md

@ -32,7 +32,7 @@ so your deployment will be spread/HA.
leveldb is the default database, this supports multiple filer replicas that will [sync automatically](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication), with some [limitations](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication#limitation).
When the [limitations](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication#limitation) apply, or for a large number of filer replicas, an external datastore is recommened.
When the [limitations](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication#limitation) apply, or for a large number of filer replicas, an external datastore is recommended.
Such as MySQL-compatible database, as specified in the `values.yaml` at `filer.extraEnvironmentVars`.
This database should be pre-configured and initialized by running:
@ -96,7 +96,7 @@ filer:
enabled: true
```
### Enabling Authenticaion to S3
### Enabling Authentication to S3
To enable authentication for S3, you have two options:

3402
k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
File diff suppressed because it is too large
View File

45
k8s/charts/seaweedfs/templates/_helpers.tpl

@ -49,25 +49,7 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.filer.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
{{/* Return the proper dbSchema image */}}
{{- define "filer.dbSchema.image" -}}
{{- if .Values.filer.dbSchema.imageOverride -}}
{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.global.repository | toString -}}
{{- $name := .Values.filer.dbSchema.imageName | toString -}}
{{- $tag := .Values.filer.dbSchema.imageTag | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- include "common.image" . }}
{{- end -}}
{{- end -}}
@ -77,11 +59,7 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.master.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- include "common.image" . }}
{{- end -}}
{{- end -}}
@ -91,11 +69,7 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.s3.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- include "common.image" . }}
{{- end -}}
{{- end -}}
@ -105,11 +79,20 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.volume.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- include "common.image" . }}
{{- end -}}
{{- end -}}
{{/* Computes the container image name for all components (if they are not overridden) */}}
{{- define "common.image" -}}
{{- $registryName := default .Values.image.registry .Values.global.registry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- if $registryName -}}
{{- printf "%s/%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- else -}}
{{- printf "%s%s:%s" $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}

35
k8s/charts/seaweedfs/templates/cluster-role.yaml

@ -0,0 +1,35 @@
{{- if .Values.global.createClusterRole }}
#hack for delete pod master after migration
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.global.serviceAccountName }}-rw-cr
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:serviceaccount:{{ .Values.global.serviceAccountName }}:default
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Values.global.serviceAccountName }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Values.global.serviceAccountName }}-rw-cr
{{- end }}

4
k8s/charts/seaweedfs/templates/filler-ingress.yaml → k8s/charts/seaweedfs/templates/filer-ingress.yaml

@ -11,9 +11,9 @@ kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
{{- if .Values.filer.ingress.annotations }}
{{- with .Values.filer.ingress.annotations }}
annotations:
{{ tpl .Values.filer.ingress.annotations . | nindent 4 | trim }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}

3
k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml

@ -12,6 +12,9 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

34
k8s/charts/seaweedfs/templates/filer-statefulset.yaml

@ -35,10 +35,16 @@ spec:
{{ with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.filer.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{ with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.filer.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }}
{{- if .Values.filer.affinity }}
@ -50,9 +56,7 @@ spec:
{{ tpl .Values.filer.tolerations . | nindent 8 | trim }}
{{- end }}
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }}
{{- if .Values.global.createClusterRole }}
serviceAccountName: {{ .Values.filer.serviceAccountName | default .Values.global.serviceAccountName | quote }} # for deleting statefulset pods after migration
{{- end }}
terminationGracePeriodSeconds: 60
{{- if .Values.filer.priorityClassName }}
priorityClassName: {{ .Values.filer.priorityClassName | quote }}
@ -62,6 +66,9 @@ spec:
initContainers:
{{ tpl .Values.filer.initContainers . | nindent 8 | trim }}
{{- end }}
{{- if .Values.filer.podSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "filer.image" . }}
@ -84,11 +91,13 @@ spec:
secretKeyRef:
name: secret-seaweedfs-db
key: user
optional: true
- name: WEED_MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: secret-seaweedfs-db
key: password
optional: true
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.filer.extraEnvironmentVars }}
@ -124,7 +133,7 @@ spec:
- "-ec"
- |
exec /usr/bin/weed \
{{- if eq .Values.filer.logs.type "hostPath" }}
{{- if or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir") }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
@ -192,7 +201,7 @@ spec:
{{- end }}
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
volumeMounts:
{{- if eq .Values.filer.logs.type "hostPath" }}
{{- if (or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir")) }}
- name: seaweedfs-filer-log-volume
mountPath: "/logs/"
{{- end }}
@ -201,7 +210,7 @@ spec:
mountPath: /etc/sw
readOnly: true
{{- end }}
{{- if (or .Values.filer.enablePVC (or (eq .Values.filer.data.type "hostPath") (eq .Values.filer.data.type "persistentVolumeClaim"))) }}
{{- if (or .Values.filer.enablePVC (or (eq .Values.filer.data.type "hostPath") (eq .Values.filer.data.type "persistentVolumeClaim") (eq .Values.filer.data.type "emptyDir"))) }}
- name: data-filer
mountPath: /data
{{- end }}
@ -258,9 +267,12 @@ spec:
failureThreshold: {{ .Values.filer.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.filer.livenessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.filer.resources }}
{{- with .Values.filer.resources }}
resources:
{{ tpl .Values.filer.resources . | nindent 12 | trim }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.filer.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.filer.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.filer.sidecars "context" $) | nindent 8 }}
@ -277,6 +289,10 @@ spec:
persistentVolumeClaim:
claimName: {{ .Values.filer.logs.claimName }}
{{- end }}
{{- if eq .Values.filer.logs.type "emptyDir" }}
- name: seaweedfs-filer-log-volume
emptyDir: {}
{{- end }}
{{- if eq .Values.filer.data.type "hostPath" }}
- name: data-filer
hostPath:
@ -288,6 +304,10 @@ spec:
persistentVolumeClaim:
claimName: {{ .Values.filer.data.claimName }}
{{- end }}
{{- if eq .Values.filer.data.type "emptyDir" }}
- name: data-filer
emptyDir: {}
{{- end }}
- name: db-schema-config-volume
configMap:
name: seaweedfs-db-init-config

4
k8s/charts/seaweedfs/templates/master-ingress.yaml

@ -11,9 +11,9 @@ kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
{{- if .Values.master.ingress.annotations }}
{{- with .Values.master.ingress.annotations }}
annotations:
{{ tpl .Values.master.ingress.annotations . | nindent 4 | trim }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}

3
k8s/charts/seaweedfs/templates/master-servicemonitor.yaml

@ -12,6 +12,9 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

34
k8s/charts/seaweedfs/templates/master-statefulset.yaml

@ -34,10 +34,16 @@ spec:
{{ with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.master.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{ with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.master.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }}
{{- if .Values.master.affinity }}
@ -61,6 +67,9 @@ spec:
initContainers:
{{ tpl .Values.master.initContainers . | nindent 8 | trim }}
{{- end }}
{{- if .Values.master.podSecurityContext.enabled }}
securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "master.image" . }}
@ -105,9 +114,9 @@ spec:
command:
- "/bin/sh"
- "-ec"
- |
- |
exec /usr/bin/weed \
{{- if eq .Values.master.logs.type "hostPath" }}
{{- if or (eq .Values.master.logs.type "hostPath") (eq .Values.master.logs.type "persistentVolumeClaim") (eq .Values.master.logs.type "emptyDir") }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
@ -155,7 +164,7 @@ spec:
volumeMounts:
- name : data-{{ .Release.Namespace }}
mountPath: /data
{{- if eq .Values.master.logs.type "hostPath" }}
{{- if or (eq .Values.master.logs.type "hostPath") (eq .Values.master.logs.type "persistentVolumeClaim") (eq .Values.master.logs.type "emptyDir") }}
- name: seaweedfs-master-log-volume
mountPath: "/logs/"
{{- end }}
@ -218,9 +227,12 @@ spec:
failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.master.resources }}
{{- with .Values.master.resources }}
resources:
{{ tpl .Values.master.resources . | nindent 12 | trim }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.master.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.master.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }}
@ -235,7 +247,11 @@ spec:
{{- if eq .Values.master.logs.type "existingClaim" }}
- name: seaweedfs-master-log-volume
persistentVolumeClaim:
claimName: {{ .Values.master.logs.claimName }}
claimName: {{ .Values.master.logs.claimName }}
{{- end }}
{{- if eq .Values.master.logs.type "emptyDir" }}
- name: seaweedfs-master-log-volume
emptyDir: {}
{{- end }}
{{- if eq .Values.master.data.type "hostPath" }}
- name: data-{{ .Release.Namespace }}
@ -246,7 +262,11 @@ spec:
{{- if eq .Values.master.data.type "existingClaim" }}
- name: data-{{ .Release.Namespace }}
persistentVolumeClaim:
claimName: {{ .Values.master.data.claimName }}
claimName: {{ .Values.master.data.claimName }}
{{- end }}
{{- if eq .Values.master.data.type "emptyDir" }}
- name: data-{{ .Release.Namespace }}
emptyDir: {}
{{- end }}
- name: master-config
configMap:

6
k8s/charts/seaweedfs/templates/post-install-bucket-hook.yaml

@ -22,6 +22,9 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name | quote }}
spec:
restartPolicy: Never
{{- if .Values.filer.podSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: post-install-job
image: {{ template "master.image" . }}
@ -80,6 +83,9 @@ spec:
{{- end }}
- containerPort: {{ .Values.master.grpcPort }}
#name: swfs-master-grpc
{{- if .Values.filer.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
volumes:
- name: config-users

24
k8s/charts/seaweedfs/templates/s3-deployment.yaml

@ -27,10 +27,16 @@ spec:
{{ with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.s3.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{ with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.s3.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }}
{{- if .Values.s3.tolerations }}
@ -50,6 +56,9 @@ spec:
initContainers:
{{ tpl .Values.s3.initContainers . | nindent 8 | trim }}
{{- end }}
{{- if .Values.s3.podSecurityContext.enabled }}
securityContext: {{- omit .Values.s3.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "s3.image" . }}
@ -96,7 +105,7 @@ spec:
- "-ec"
- |
exec /usr/bin/weed \
{{- if eq .Values.s3.logs.type "hostPath" }}
{{- if or (eq .Values.s3.logs.type "hostPath") (eq .Values.s3.logs.type "emptyDir") }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
@ -133,7 +142,7 @@ spec:
{{- end }}
-filer={{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }}
volumeMounts:
{{- if eq .Values.s3.logs.type "hostPath" }}
{{- if or (eq .Values.s3.logs.type "hostPath") (eq .Values.s3.logs.type "emptyDir") }}
- name: logs
mountPath: "/logs/"
{{- end }}
@ -195,9 +204,12 @@ spec:
failureThreshold: {{ .Values.s3.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.s3.livenessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.s3.resources }}
{{- with .Values.s3.resources }}
resources:
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.s3.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.s3.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.s3.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.s3.sidecars "context" $) | nindent 8 }}
@ -219,6 +231,10 @@ spec:
path: {{ .Values.s3.logs.hostPathPrefix }}/logs/seaweedfs/s3
type: DirectoryOrCreate
{{- end }}
{{- if eq .Values.s3.logs.type "emptyDir" }}
- name: logs
emptyDir: {}
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:

3
k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml

@ -12,6 +12,9 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

37
k8s/charts/seaweedfs/templates/service-account.yaml

@ -1,20 +1,3 @@
{{- if .Values.global.createClusterRole }}
#hack for delete pod master after migration
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.global.serviceAccountName }}-rw-cr
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
@ -25,22 +8,4 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:serviceaccount:{{ .Values.global.serviceAccountName }}:default
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Values.global.serviceAccountName }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Values.global.serviceAccountName }}-rw-cr
{{- end }}
automountServiceAccountToken: {{ .Values.global.automountServiceAccountToken }}

3
k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml

@ -12,6 +12,9 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: volume
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

28
k8s/charts/seaweedfs/templates/volume-statefulset.yaml

@ -28,10 +28,16 @@ spec:
{{ with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.volume.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{ with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.volume.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.volume.affinity }}
affinity:
@ -72,6 +78,9 @@ spec:
{{ tpl .Values.volume.initContainers . | nindent 8 | trim }}
{{- end }}
{{- end }}
{{- if .Values.volume.podSecurityContext.enabled }}
securityContext: {{- omit .Values.volume.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "volume.image" . }}
@ -233,9 +242,12 @@ spec:
failureThreshold: {{ .Values.volume.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.volume.livenessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.volume.resources }}
{{- with .Values.volume.resources }}
resources:
{{ tpl .Values.volume.resources . | nindent 12 | trim }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.volume.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.volume.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.volume.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.volume.sidecars "context" $) | nindent 8 }}
@ -255,6 +267,10 @@ spec:
persistentVolumeClaim:
claimName: {{ $dir.claimName }}
{{- end }}
{{- if eq $dir.type "emptyDir" }}
- name: {{ $dir.name }}
emptyDir: {}
{{- end }}
{{- end }}
@ -270,6 +286,10 @@ spec:
persistentVolumeClaim:
claimName: {{ .Values.volume.idx.claimName }}
{{- end }}
{{- if eq .Values.volume.idx.type "emptyDir" }}
- name: idx
emptyDir: {}
{{- end }}
{{- end }}
{{- if .Values.volume.logs }}
@ -284,6 +304,10 @@ spec:
persistentVolumeClaim:
claimName: {{ .Values.volume.logs.claimName }}
{{- end }}
{{- if eq .Values.volume.logs.type "emptyDir" }}
- name: logs
emptyDir: {}
{{- end }}
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config

168
k8s/charts/seaweedfs/values.yaml

@ -19,12 +19,14 @@ global:
filerRead: false
# we will use this serviceAccountName for all ClusterRoles/ClusterRoleBindings
serviceAccountName: "seaweedfs"
automountServiceAccountToken: true
certificates:
alphacrds: false
monitoring:
enabled: false
gatewayHost: null
gatewayPort: null
additionalLabels: {}
# if enabled will use global.replicationPlacment and override master & filer defaultReplicaPlacement config
enableReplication: false
# replication type is XYZ:
@ -47,9 +49,6 @@ image:
master:
enabled: true
repository: null
imageName: null
imageTag: null
imageOverride: null
restartPolicy: null
replicas: 1
@ -77,7 +76,7 @@ master:
config: |-
# Enter any extra configuration for master.toml here.
# It may be be a multi-line string.
# It may be a multi-line string.
# You may use ANY storage-class, example with local-path-provisioner
# Annotations are optional.
@ -92,11 +91,18 @@ master:
# data:
# type: "existingClaim"
# claimName: "my-pvc"
#
# You can also use emptyDir storage:
# data:
# type: "emptyDir"
data:
type: "hostPath"
storageClass: ""
hostPathPrefix: /ssd
# You can also use emptyDir storage:
# logs:
# type: "emptyDir"
logs:
type: "hostPath"
size: ""
@ -119,6 +125,12 @@ master:
extraVolumes: ""
extraVolumeMounts: ""
# Labels to be added to the master pods
podLabels: {}
# Annotations to be added to the master pods
podAnnotations: {}
## Set podManagementPolicy
podManagementPolicy: Parallel
@ -126,7 +138,7 @@ master:
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
# is made.
resources: null
resources: {}
# updatePartition is used to control a careful rolling update of SeaweedFS
# masters.
@ -166,12 +178,31 @@ master:
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccountName: ""
# Configure security context for Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# podSecurityContext:
# enabled: true
# runAsUser: 1000
# runAsGroup: 3000
# fsGroup: 2000
podSecurityContext: {}
# Configure security context for Container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# containerSecurityContext:
# enabled: true
# runAsUser: 2000
# allowPrivilegeEscalation: false
containerSecurityContext: {}
ingress:
enabled: false
className: "nginx"
# host: false for "*" hostname
host: "master.seaweedfs.local"
annotations: |
annotations:
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
@ -223,9 +254,6 @@ master:
volume:
enabled: true
repository: null
imageName: null
imageTag: null
imageOverride: null
restartPolicy: null
port: 8080
@ -255,9 +283,14 @@ volume:
# maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7")
#
# You may also spacify an existing claim:
# - name: data
# type: "existingClaim"
# claimName: "my-pvc"
# - name: data
# type: "existingClaim"
# claimName: "my-pvc"
# maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7")
#
# You can also use emptyDir storage:
# - name: data
# type: "emptyDir"
# maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7")
dataDirs:
@ -290,12 +323,17 @@ volume:
# idx:
# type: "existingClaim"
# claimName: "myClaim"
#
# or
#
# idx:
# type: "emptyDir"
# same applies to "logs"
idx: ""
idx: {}
logs: ""
logs: {}
# limit background compaction or copying speed in mega bytes per second
compactionMBps: "50"
@ -332,6 +370,12 @@ volume:
extraVolumes: ""
extraVolumeMounts: ""
# Labels to be added to the volume pods
podLabels: {}
# Annotations to be added to the volume pods
podAnnotations: {}
## Set podManagementPolicy
podManagementPolicy: Parallel
@ -352,7 +396,7 @@ volume:
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
# is made.
resources: null
resources: {}
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
@ -377,6 +421,25 @@ volume:
extraEnvironmentVars:
# Configure security context for Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# podSecurityContext:
# enabled: true
# runAsUser: 1000
# runAsGroup: 3000
# fsGroup: 2000
podSecurityContext: {}
# Configure security context for Container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# containerSecurityContext:
# enabled: true
# runAsUser: 2000
# allowPrivilegeEscalation: false
containerSecurityContext: {}
# used to configure livenessProbe on volume-server containers
#
livenessProbe:
@ -405,9 +468,6 @@ volume:
filer:
enabled: true
repository: null
imageName: null
imageTag: null
imageOverride: null
restartPolicy: null
replicas: 1
@ -460,12 +520,19 @@ filer:
# data:
# type: "existingClaim"
# claimName: "my-pvc"
#
# You can also use emptyDir storage:
# data:
# type: "emptyDir"
data:
type: "hostPath"
size: ""
storageClass: ""
hostPathPrefix: /storage
# You can also use emptyDir storage:
# logs:
# type: "emptyDir"
logs:
type: "hostPath"
size: ""
@ -488,6 +555,12 @@ filer:
extraVolumes: ""
extraVolumeMounts: ""
# Labels to be added to the filer pods
podLabels: {}
# Annotations to be added to the filer pods
podAnnotations: {}
## Set podManagementPolicy
podManagementPolicy: Parallel
@ -512,7 +585,7 @@ filer:
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
# is made.
resources: null
resources: {}
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
@ -535,12 +608,31 @@ filer:
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccountName: ""
# Configure security context for Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# podSecurityContext:
# enabled: true
# runAsUser: 1000
# runAsGroup: 3000
# fsGroup: 2000
podSecurityContext: {}
# Configure security context for Container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# containerSecurityContext:
# enabled: true
# runAsUser: 2000
# allowPrivilegeEscalation: false
containerSecurityContext: {}
ingress:
enabled: false
className: "nginx"
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
annotations: |
annotations:
nginx.ingress.kubernetes.io/backend-protocol: GRPC
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
@ -605,7 +697,7 @@ filer:
timeoutSeconds: 10
# secret env variables
secretExtraEnvironmentVars: []
secretExtraEnvironmentVars: {}
# WEED_POSTGRES_USERNAME:
# secretKeyRef:
# name: postgres-credentials
@ -640,9 +732,7 @@ filer:
s3:
enabled: false
repository: null
imageName: null
imageTag: null
imageOverride: null
restartPolicy: null
replicas: 1
bindAddress: 0.0.0.0
@ -679,6 +769,12 @@ s3:
extraVolumes: ""
extraVolumeMounts: ""
# Labels to be added to the s3 pods
podLabels: {}
# Annotations to be added to the s3 pods
podAnnotations: {}
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
@ -706,6 +802,28 @@ s3:
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccountName: ""
# Configure security context for Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# podSecurityContext:
# enabled: true
# runAsUser: 1000
# runAsGroup: 3000
# fsGroup: 2000
podSecurityContext: {}
# Configure security context for Container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# containerSecurityContext:
# enabled: true
# runAsUser: 2000
# allowPrivilegeEscalation: false
containerSecurityContext: {}
# You can also use emptyDir storage:
# logs:
# type: "emptyDir"
logs:
type: "hostPath"
size: ""
@ -746,7 +864,7 @@ s3:
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
# additional ingress annotations for the s3 endpoint
annotations: []
annotations: {}
tls: []
certificates:

BIN
note/keepsec.png

After

Width: 100  |  Height: 36  |  Size: 7.8 KiB

BIN
note/piknik.png

Before

Width: 165  |  Height: 50  |  Size: 7.2 KiB

After

Width: 156  |  Height: 35  |  Size: 7.4 KiB

13
other/java/client/src/main/proto/filer.proto

@ -54,6 +54,9 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
rpc TraverseBfsMetadata (TraverseBfsMetadataRequest) returns (stream TraverseBfsMetadataResponse) {
}
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
}
@ -360,6 +363,15 @@ message SubscribeMetadataResponse {
int64 ts_ns = 3;
}
message TraverseBfsMetadataRequest {
string directory = 1;
repeated string excluded_prefixes = 2;
}
message TraverseBfsMetadataResponse {
string directory = 1;
Entry entry = 2;
}
message LogEntry {
int64 ts_ns = 1;
int32 partition_key_hash = 2;
@ -426,6 +438,7 @@ message FilerConf {
string rack = 10;
string data_node = 11;
uint32 max_file_name_length = 12;
bool disable_chunk_deletion = 13;
}
repeated PathConf locations = 2;
}

3
unmaintained/repeated_vacuum/repeated_vacuum.go

@ -7,6 +7,7 @@ import (
"log"
"math/rand"
"time"
"context"
"google.golang.org/grpc"
@ -53,7 +54,7 @@ func main() {
}
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
assignResult, err := operation.Assign(func() pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{
assignResult, err := operation.Assign(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{
Count: 1,
Replication: *replication,
})

3
unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go

@ -66,6 +66,7 @@ func main() {
go func() {
ticker := time.NewTicker(1000 * time.Millisecond)
defer ticker.Stop()
var lastTime time.Time
var counter, size int64
@ -114,7 +115,7 @@ func uploadFileToFiler(client *http.Client, data []byte, filename, destination s
uri := destination + filename
request, err := http.NewRequest("POST", uri, body)
request, err := http.NewRequest(http.MethodPost, uri, body)
request.Header.Set("Content-Type", writer.FormDataContentType())
// request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also.

3
unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go

@ -72,6 +72,7 @@ func main() {
go func() {
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
var lastTime time.Time
var counter, size int64
@ -129,7 +130,7 @@ func uploadFileToFiler(client *http.Client, filename, destination string) (size
uri := destination + file.Name()
request, err := http.NewRequest("POST", uri, body)
request, err := http.NewRequest(http.MethodPost, uri, body)
request.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := client.Do(request)

3
unmaintained/volume_tailer/volume_tailer.go

@ -5,6 +5,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb"
"log"
"time"
"context"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/security"
@ -38,7 +39,7 @@ func main() {
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
}
err := operation.TailVolume(func() pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
err := operation.TailVolume(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
if n.Size == 0 {
println("-", n.String())
return nil

3
weed/cluster/lock_manager/distributed_lock_manager.go

@ -55,7 +55,8 @@ func (dlm *DistributedLockManager) FindLockOwner(key string) (owner string, move
return
}
if movedTo != dlm.Host {
glog.V(0).Infof("lock %s not on current %s but on %s", key, dlm.Host, movedTo)
servers := dlm.LockRing.GetSnapshot()
glog.V(0).Infof("lock %s not on current %s but on %s from %v", key, dlm.Host, movedTo, servers)
return
}
owner, err = dlm.lockManager.GetLockOwner(key)

11
weed/cluster/lock_manager/lock_ring.go

@ -1,6 +1,7 @@
package lock_manager
import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"sort"
@ -39,9 +40,11 @@ func (r *LockRing) SetTakeSnapshotCallback(onTakeSnapshot func(snapshot []pb.Ser
// AddServer adds a server to the ring
// if the previous snapshot passed the snapshot interval, create a new snapshot
func (r *LockRing) AddServer(server pb.ServerAddress) {
glog.V(0).Infof("add server %v", server)
r.Lock()
if _, found := r.candidateServers[server]; found {
glog.V(0).Infof("add server: already exists %v", server)
r.Unlock()
return
}
@ -53,6 +56,8 @@ func (r *LockRing) AddServer(server pb.ServerAddress) {
}
func (r *LockRing) RemoveServer(server pb.ServerAddress) {
glog.V(0).Infof("remove server %v", server)
r.Lock()
if _, found := r.candidateServers[server]; !found {
@ -72,7 +77,13 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
return servers[i] < servers[j]
})
r.Lock()
r.lastUpdateTime = time.Now()
// init candidateServers
for _, server := range servers {
r.candidateServers[server] = struct{}{}
}
r.Unlock()
r.addOneSnapshot(servers)

3
weed/command/backup.go

@ -1,6 +1,7 @@
package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
@ -74,7 +75,7 @@ func runBackup(cmd *Command, args []string) bool {
vid := needle.VolumeId(*s.volumeId)
// find volume location, replication, ttl info
lookup, err := operation.LookupVolumeId(func() pb.ServerAddress { return pb.ServerAddress(*s.master) }, grpcDialOption, vid.String())
lookup, err := operation.LookupVolumeId(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*s.master) }, grpcDialOption, vid.String())
if err != nil {
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
return true

8
weed/command/benchmark.go

@ -2,6 +2,7 @@ package command
import (
"bufio"
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"io"
@ -128,8 +129,9 @@ func runBenchmark(cmd *Command, args []string) bool {
}
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "", "client", "", "", "", *pb.ServerAddresses(*b.masters).ToServiceDiscovery())
go b.masterClient.KeepConnectedToMaster()
b.masterClient.WaitUntilConnected()
ctx := context.Background()
go b.masterClient.KeepConnectedToMaster(ctx)
b.masterClient.WaitUntilConnected(ctx)
if *b.write {
benchWrite()
@ -210,7 +212,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
}
var jwtAuthorization security.EncodedJwt
if isSecure {
jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), b.grpcDialOption, df.fp.Fid)
jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(context.Background()), b.grpcDialOption, df.fp.Fid)
}
if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil {
s.completed++

3
weed/command/download.go

@ -1,6 +1,7 @@
package command
import (
"context"
"fmt"
"io"
"net/http"
@ -50,7 +51,7 @@ func runDownload(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for _, fid := range args {
if e := downloadToFile(func() pb.ServerAddress { return pb.ServerAddress(*d.server) }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil {
if e := downloadToFile(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*d.server) }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil {
fmt.Println("Download Error: ", fid, e)
}
}

2
weed/command/filer_backup.go

@ -158,7 +158,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
DirectoriesToWatch: nil,
StartTsNs: startFrom.UnixNano(),
StopTsNs: 0,
EventErrorType: pb.TrivialOnError,
EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(sourceFiler, grpcDialOption, metadataFollowOption, processEventFnWithOffset)

2
weed/command/filer_copy.go

@ -472,7 +472,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId)
}
operation.DeleteFiles(func() pb.ServerAddress {
operation.DeleteFiles(func(_ context.Context) pb.ServerAddress {
return pb.ServerAddress(copy.masters[0])
}, false, worker.options.grpcDialOption, fileIds)
return uploadError

6
weed/command/filer_meta_backup.go

@ -63,9 +63,9 @@ func runFilerMetaBackup(cmd *Command, args []string) bool {
v.SetConfigFile(*metaBackup.backupFilerConfig)
if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file
glog.Fatalf("Failed to load %s file.\nPlease use this command to generate the a %s.toml file\n"+
glog.Fatalf("Failed to load %s file: %v\nPlease use this command to generate the a %s.toml file\n"+
" weed scaffold -config=%s -output=.\n\n\n",
*metaBackup.backupFilerConfig, "backup_filer", "filer")
*metaBackup.backupFilerConfig, err, "backup_filer", "filer")
}
if err := metaBackup.initStore(v); err != nil {
@ -207,7 +207,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
DirectoriesToWatch: nil,
StartTsNs: startTime.UnixNano(),
StopTsNs: 0,
EventErrorType: pb.TrivialOnError,
EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, metadataFollowOption, processEventFnWithOffset)

2
weed/command/filer_remote_gateway_buckets.go

@ -60,7 +60,7 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo
DirectoriesToWatch: nil,
StartTsNs: lastOffsetTs.UnixNano(),
StopTsNs: 0,
EventErrorType: pb.TrivialOnError,
EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, metadataFollowOption, processEventFnWithOffset)

2
weed/command/filer_remote_sync_dir.go

@ -74,7 +74,7 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
DirectoriesToWatch: nil,
StartTsNs: lastOffsetTs.UnixNano(),
StopTsNs: 0,
EventErrorType: pb.TrivialOnError,
EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, metadataFollowOption, processEventFnWithOffset)

58
weed/command/fix.go

@ -32,12 +32,15 @@ var cmdFix = &Command{
var (
fixVolumeCollection = cmdFix.Flag.String("collection", "", "an optional volume collection name, if specified only it will be processed")
fixVolumeId = cmdFix.Flag.Int64("volumeId", 0, "an optional volume id, if not 0 (default) only it will be processed")
fixIncludeDeleted = cmdFix.Flag.Bool("includeDeleted", true, "include deleted entries in the index file")
fixIgnoreError = cmdFix.Flag.Bool("ignoreError", false, "an optional, if true will be processed despite errors")
)
type VolumeFileScanner4Fix struct {
version needle.Version
nm *needle_map.MemDb
version needle.Version
nm *needle_map.MemDb
nmDeleted *needle_map.MemDb
includeDeleted bool
}
func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock super_block.SuperBlock) error {
@ -50,13 +53,20 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
}
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
glog.V(2).Infof("key %v offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
if n.Size.IsValid() {
pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
if pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size); pe != nil {
return fmt.Errorf("saved %d with error %v", n.Size, pe)
}
} else {
glog.V(2).Infof("skipping deleted file ...")
return scanner.nm.Delete(n.Id)
if scanner.includeDeleted {
if pe := scanner.nmDeleted.Set(n.Id, types.ToOffset(offset), types.TombstoneFileSize); pe != nil {
return fmt.Errorf("saved deleted %d with error %v", n.Size, pe)
}
} else {
glog.V(2).Infof("skipping deleted file ...")
return scanner.nm.Delete(n.Id)
}
}
return nil
}
@ -109,21 +119,45 @@ func runFix(cmd *Command, args []string) bool {
if *fixVolumeId != 0 && *fixVolumeId != volumeId {
continue
}
doFixOneVolume(basePath, baseFileName, collection, volumeId)
doFixOneVolume(basePath, baseFileName, collection, volumeId, *fixIncludeDeleted)
}
}
return true
}
func doFixOneVolume(basepath string, baseFileName string, collection string, volumeId int64) {
func SaveToIdx(scaner *VolumeFileScanner4Fix, idxName string) (ret error) {
idxFile, err := os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return
}
defer func() {
idxFile.Close()
}()
return scaner.nm.AscendingVisit(func(value needle_map.NeedleValue) error {
_, err := idxFile.Write(value.ToBytes())
if scaner.includeDeleted && err == nil {
if deleted, ok := scaner.nmDeleted.Get(value.Key); ok {
_, err = idxFile.Write(deleted.ToBytes())
}
}
return err
})
}
func doFixOneVolume(basepath string, baseFileName string, collection string, volumeId int64, fixIncludeDeleted bool) {
indexFileName := path.Join(basepath, baseFileName+".idx")
nm := needle_map.NewMemDb()
nmDeleted := needle_map.NewMemDb()
defer nm.Close()
defer nmDeleted.Close()
vid := needle.VolumeId(volumeId)
scanner := &VolumeFileScanner4Fix{
nm: nm,
nm: nm,
nmDeleted: nmDeleted,
includeDeleted: fixIncludeDeleted,
}
if err := storage.ScanVolumeFile(basepath, collection, vid, storage.NeedleMapInMemory, scanner); err != nil {
@ -135,12 +169,12 @@ func doFixOneVolume(basepath string, baseFileName string, collection string, vol
}
}
if err := nm.SaveToIdx(indexFileName); err != nil {
os.Remove(indexFileName)
if err := SaveToIdx(scanner, indexFileName); err != nil {
err := fmt.Errorf("save to .idx File: %v", err)
if *fixIgnoreError {
glog.Error(err)
} else {
os.Remove(indexFileName)
glog.Fatal(err)
}
}

9
weed/command/master.go

@ -1,6 +1,7 @@
package command
import (
"context"
"fmt"
"net/http"
"os"
@ -179,10 +180,10 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}
}
ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
r.HandleFunc("/cluster/healthz", raftServer.HealthzHandler).Methods("GET", "HEAD")
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods(http.MethodGet)
r.HandleFunc("/cluster/healthz", raftServer.HealthzHandler).Methods(http.MethodGet, http.MethodHead)
if *masterOption.raftHashicorp {
r.HandleFunc("/raft/stats", raftServer.StatsRaftHandler).Methods("GET")
r.HandleFunc("/raft/stats", raftServer.StatsRaftHandler).Methods(http.MethodGet)
}
// starting grpc server
grpcPort := *masterOption.portGrpc
@ -218,7 +219,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}()
}
go ms.MasterClient.KeepConnectedToMaster()
go ms.MasterClient.KeepConnectedToMaster(context.Background())
// start http server
var (

2
weed/command/master_follower.go

@ -140,7 +140,7 @@ func startMasterFollower(masterOptions MasterOptions) {
}
go grpcS.Serve(grpcL)
go ms.MasterClient.KeepConnectedToMaster()
go ms.MasterClient.KeepConnectedToMaster(context.Background())
// start http server
httpS := &http.Server{Handler: r}

7
weed/command/scaffold/filer.toml

@ -280,6 +280,13 @@ tls_client_key_file=""
[mongodb]
enabled = false
uri = "mongodb://localhost:27017"
username = ""
password = ""
ssl = false
ssl_ca_file = ""
ssl_cert_file = ""
ssl_key_file = "
insecure_skip_verify = false
option_pool_size = 0
database = "seaweedfs"

2
weed/command/server.go

@ -97,7 +97,7 @@ func init() {
masterOptions.metricsIntervalSec = cmdServer.Flag.Int("master.metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
masterOptions.raftResumeState = cmdServer.Flag.Bool("master.resumeState", false, "resume previous state on start master server")
masterOptions.raftHashicorp = cmdServer.Flag.Bool("master.raftHashicorp", false, "use hashicorp raft")
masterOptions.raftBootstrap = cmdMaster.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
masterOptions.raftBootstrap = cmdServer.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
masterOptions.heartbeatInterval = cmdServer.Flag.Duration("master.heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)")
masterOptions.electionTimeout = cmdServer.Flag.Duration("master.electionTimeout", 10*time.Second, "election timeout of master servers")

4
weed/command/upload.go

@ -97,7 +97,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
results, e := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
results, e := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@ -119,7 +119,7 @@ func runUpload(cmd *Command, args []string) bool {
fmt.Println(e.Error())
return false
}
results, err := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
results, err := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
if err != nil {
fmt.Println(err.Error())
return false

6
weed/filer/abstract_sql/abstract_sql_store.go

@ -7,6 +7,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket"
"github.com/seaweedfs/seaweedfs/weed/util"
"strings"
"sync"
@ -140,6 +141,8 @@ func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.Full
}
}
} else {
err = fmt.Errorf("invalid bucket name %s", bucket)
}
return
@ -340,6 +343,9 @@ func (store *AbstractSqlStore) Shutdown() {
}
func isValidBucket(bucket string) bool {
if s3bucket.VerifyS3BucketName(bucket) != nil {
return false
}
return bucket != DEFAULT_TABLE && bucket != ""
}

6
weed/filer/filechunk_manifest.go

@ -107,7 +107,7 @@ func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFi
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return err
}
err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, cipherKey, isGzipped, true, 0, 0)
err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
if err != nil {
return err
}
@ -123,7 +123,7 @@ func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunction
return util.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset)
}
func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) {
func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) {
var shouldRetry bool
var totalWritten int
@ -132,7 +132,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKe
for _, urlString := range urlStrings {
var localProcessed int
var writeErr error
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
shouldRetry, err = util.ReadUrlAsStreamAuthenticated(urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
if totalWritten > localProcessed {
toBeSkipped := totalWritten - localProcessed
if len(data) <= toBeSkipped {

36
weed/filer/filer.go

@ -78,7 +78,7 @@ func NewFiler(masters pb.ServerDiscovery, grpcDialOption grpc.DialOption, filerH
return f
}
func (f *Filer) MaybeBootstrapFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, snapshotTime time.Time) (err error) {
func (f *Filer) MaybeBootstrapFromOnePeer(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, snapshotTime time.Time) (err error) {
if len(existingNodes) == 0 {
return
}
@ -91,25 +91,13 @@ func (f *Filer) MaybeBootstrapFromPeers(self pb.ServerAddress, existingNodes []*
}
glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId)
f.UniqueFilerEpoch++
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: "bootstrap",
ClientId: f.UniqueFilerId,
ClientEpoch: f.UniqueFilerEpoch,
SelfSignature: f.Signature,
PathPrefix: "/",
AdditionalPathPrefixes: nil,
DirectoriesToWatch: nil,
StartTsNs: 0,
StopTsNs: snapshotTime.UnixNano(),
EventErrorType: pb.FatalOnError,
}
err = pb.FollowMetadata(pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, metadataFollowOption, func(resp *filer_pb.SubscribeMetadataResponse) error {
return Replay(f.Store, resp)
return pb.WithFilerClient(false, f.UniqueFilerId, pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return filer_pb.StreamBfs(client, "/", snapshotTime.UnixNano(), func(parentPath util.FullPath, entry *filer_pb.Entry) error {
return f.Store.InsertEntry(context.Background(), FromPbEntry(string(parentPath), entry))
})
})
return
}
func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, startFrom time.Time) {
@ -143,8 +131,8 @@ func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*maste
}
func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNodeUpdate) {
return cluster.ListExistingPeerUpdates(f.GetMaster(), f.GrpcDialOption, f.MasterClient.FilerGroup, cluster.FilerType)
func (f *Filer) ListExistingPeerUpdates(ctx context.Context) (existingNodes []*master_pb.ClusterNodeUpdate) {
return cluster.ListExistingPeerUpdates(f.GetMaster(ctx), f.GrpcDialOption, f.MasterClient.FilerGroup, cluster.FilerType)
}
func (f *Filer) SetStore(store FilerStore) (isFresh bool) {
@ -177,12 +165,12 @@ func (f *Filer) GetStore() (store FilerStore) {
return f.Store
}
func (fs *Filer) GetMaster() pb.ServerAddress {
return fs.MasterClient.GetMaster()
func (fs *Filer) GetMaster(ctx context.Context) pb.ServerAddress {
return fs.MasterClient.GetMaster(ctx)
}
func (fs *Filer) KeepMasterClientConnected() {
fs.MasterClient.KeepConnectedToMaster()
func (fs *Filer) KeepMasterClientConnected(ctx context.Context) {
fs.MasterClient.KeepConnectedToMaster(ctx)
}
func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {

25
weed/filer/filer_conf.go

@ -27,7 +27,7 @@ const (
)
type FilerConf struct {
rules ptrie.Trie
rules ptrie.Trie[*filer_pb.FilerConf_PathConf]
}
func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) {
@ -55,7 +55,7 @@ func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOp
func NewFilerConf() (fc *FilerConf) {
fc = &FilerConf{
rules: ptrie.New(),
rules: ptrie.New[*filer_pb.FilerConf_PathConf](),
}
return fc
}
@ -120,8 +120,8 @@ func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err
}
func (fc *FilerConf) DeleteLocationConf(locationPrefix string) {
rules := ptrie.New()
fc.rules.Walk(func(key []byte, value interface{}) bool {
rules := ptrie.New[*filer_pb.FilerConf_PathConf]()
fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
if string(key) == locationPrefix {
return true
}
@ -135,9 +135,8 @@ func (fc *FilerConf) DeleteLocationConf(locationPrefix string) {
func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) {
pathConf = &filer_pb.FilerConf_PathConf{}
fc.rules.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool {
t := value.(*filer_pb.FilerConf_PathConf)
mergePathConf(pathConf, t)
fc.rules.MatchPrefix([]byte(path), func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
mergePathConf(pathConf, value)
return true
})
return pathConf
@ -145,10 +144,9 @@ func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf
func (fc *FilerConf) GetCollectionTtls(collection string) (ttls map[string]string) {
ttls = make(map[string]string)
fc.rules.Walk(func(key []byte, value interface{}) bool {
t := value.(*filer_pb.FilerConf_PathConf)
if t.Collection == collection {
ttls[t.LocationPrefix] = t.GetTtl()
fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
if value.Collection == collection {
ttls[value.LocationPrefix] = value.GetTtl()
}
return true
})
@ -176,9 +174,8 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
func (fc *FilerConf) ToProto() *filer_pb.FilerConf {
m := &filer_pb.FilerConf{}
fc.rules.Walk(func(key []byte, value interface{}) bool {
pathConf := value.(*filer_pb.FilerConf_PathConf)
m.Locations = append(m.Locations, pathConf)
fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
m.Locations = append(m.Locations, value)
return true
})
return m

6
weed/filer/filer_delete_entry.go

@ -38,7 +38,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
return nil
})
if err != nil {
glog.V(0).Infof("delete directory %s: %v", p, err)
glog.V(2).Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
}
}
@ -76,7 +76,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
glog.V(0).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
}
@ -114,7 +114,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
}
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
f.DeleteChunks(chunksToDelete)
f.DeleteChunks(entry.FullPath, chunksToDelete)
return nil
}

59
weed/filer/filer_deletion.go

@ -2,7 +2,7 @@ package filer
import (
"github.com/seaweedfs/seaweedfs/weed/storage"
"math"
"github.com/seaweedfs/seaweedfs/weed/util"
"strings"
"time"
@ -114,7 +114,19 @@ func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) {
f.doDeleteFileIds(fileIdsToDelete)
}
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
func (f *Filer) DeleteUncommittedChunks(chunks []*filer_pb.FileChunk) {
f.doDeleteChunks(chunks)
}
func (f *Filer) DeleteChunks(fullpath util.FullPath, chunks []*filer_pb.FileChunk) {
rule := f.FilerConf.MatchStorageRule(string(fullpath))
if rule.DisableChunkDeletion {
return
}
f.doDeleteChunks(chunks)
}
func (f *Filer) doDeleteChunks(chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks {
if !chunk.IsChunkManifest {
f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
@ -138,47 +150,18 @@ func (f *Filer) DeleteChunksNotRecursive(chunks []*filer_pb.FileChunk) {
}
func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
if oldEntry == nil {
return
var oldChunks, newChunks []*filer_pb.FileChunk
if oldEntry != nil {
oldChunks = oldEntry.GetChunks()
}
if newEntry == nil {
f.DeleteChunks(oldEntry.GetChunks())
return
if newEntry != nil {
newChunks = newEntry.GetChunks()
}
var toDelete []*filer_pb.FileChunk
newChunkIds := make(map[string]bool)
newDataChunks, newManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(),
newEntry.GetChunks(), 0, math.MaxInt64)
toDelete, err := MinusChunks(f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
if err != nil {
glog.Errorf("Failed to resolve new entry chunks when delete old entry chunks. new: %s, old: %s",
newEntry.GetChunks(), oldEntry.Chunks)
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
return
}
for _, newChunk := range newDataChunks {
newChunkIds[newChunk.GetFileIdString()] = true
}
for _, newChunk := range newManifestChunks {
newChunkIds[newChunk.GetFileIdString()] = true
}
oldDataChunks, oldManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(),
oldEntry.GetChunks(), 0, math.MaxInt64)
if err != nil {
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s",
newEntry.GetChunks(), oldEntry.GetChunks())
return
}
for _, oldChunk := range oldDataChunks {
if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found {
toDelete = append(toDelete, oldChunk)
}
}
for _, oldChunk := range oldManifestChunks {
if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found {
toDelete = append(toDelete, oldChunk)
}
}
f.DeleteChunksNotRecursive(toDelete)
}

2
weed/filer/filer_notify.go

@ -83,7 +83,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica
return
}
f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
f.LocalMetaLogBuffer.AddDataToBuffer([]byte(dir), data, event.TsNs)
}

12
weed/filer/filerstore_wrapper.go

@ -32,7 +32,7 @@ type VirtualFilerStore interface {
type FilerStoreWrapper struct {
defaultStore FilerStore
pathToStore ptrie.Trie
pathToStore ptrie.Trie[string]
storeIdToStore map[string]FilerStore
}
@ -42,7 +42,7 @@ func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
}
return &FilerStoreWrapper{
defaultStore: store,
pathToStore: ptrie.New(),
pathToStore: ptrie.New[string](),
storeIdToStore: make(map[string]FilerStore),
}
}
@ -85,12 +85,12 @@ func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string,
func (fsw *FilerStoreWrapper) getActualStore(path util.FullPath) (store FilerStore) {
store = fsw.defaultStore
if path == "/" {
if path == "/" || path == "//" {
return
}
var storeId string
fsw.pathToStore.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool {
storeId = value.(string)
fsw.pathToStore.MatchPrefix([]byte(path), func(key []byte, value string) bool {
storeId = value
return false
})
if storeId != "" {
@ -182,7 +182,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
}()
existingEntry, findErr := fsw.FindEntry(ctx, fp)
if findErr == filer_pb.ErrNotFound {
if findErr == filer_pb.ErrNotFound || existingEntry == nil {
return nil
}
if len(existingEntry.HardLinkId) != 0 {

6
weed/filer/leveldb2/leveldb2_store.go

@ -25,8 +25,9 @@ func init() {
}
type LevelDB2Store struct {
dbs []*leveldb.DB
dbCount int
dbs []*leveldb.DB
dbCount int
ReadOnly bool
}
func (store *LevelDB2Store) GetName() string {
@ -49,6 +50,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
Filter: filter.NewBloomFilter(8), // false positive rate 0.02
ReadOnly: store.ReadOnly,
}
for d := 0; d < dbCount; d++ {

9
weed/filer/leveldb3/leveldb3_store.go

@ -31,9 +31,10 @@ func init() {
}
type LevelDB3Store struct {
dir string
dbs map[string]*leveldb.DB
dbsLock sync.RWMutex
dir string
dbs map[string]*leveldb.DB
dbsLock sync.RWMutex
ReadOnly bool
}
func (store *LevelDB3Store) GetName() string {
@ -69,12 +70,14 @@ func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
Filter: bloom,
ReadOnly: store.ReadOnly,
}
if name != DEFAULT {
opts = &opt.Options{
BlockCacheCapacity: 16 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 8 * 1024 * 1024, // default value is 4MiB
Filter: bloom,
ReadOnly: store.ReadOnly,
}
}

2
weed/filer/meta_aggregator.go

@ -168,7 +168,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
}
dir := event.Directory
// println("received meta change", dir, "size", len(data))
ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
ma.MetaLogBuffer.AddDataToBuffer([]byte(dir), data, event.TsNs)
if maybeReplicateMetadataChange != nil {
maybeReplicateMetadataChange(event)
}

38
weed/filer/meta_replay.go

@ -2,6 +2,7 @@ package filer
import (
"context"
"sync"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -35,3 +36,40 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
return nil
}
// ParallelProcessDirectoryStructure processes each entry in parallel, and also ensure parent directories are processed first.
// This also assumes the parent directories are in the entryChan already.
func ParallelProcessDirectoryStructure(entryChan chan *Entry, concurrency int, eachEntryFn func(entry *Entry)(error)) (firstErr error) {
executors := util.NewLimitedConcurrentExecutor(concurrency)
var wg sync.WaitGroup
for entry := range entryChan {
wg.Add(1)
if entry.IsDirectory() {
func() {
defer wg.Done()
if err := eachEntryFn(entry); err != nil {
if firstErr == nil {
firstErr = err
}
}
}()
} else {
executors.Execute(func() {
defer wg.Done()
if err := eachEntryFn(entry); err != nil {
if firstErr == nil {
firstErr = err
}
}
})
}
if firstErr != nil {
break
}
}
wg.Wait()
return
}

72
weed/filer/mongodb/mongodb_store.go

@ -2,7 +2,12 @@ package mongodb
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"os"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -10,7 +15,6 @@ import (
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"time"
)
func init() {
@ -37,17 +41,44 @@ func (store *MongodbStore) Initialize(configuration util.Configuration, prefix s
store.database = configuration.GetString(prefix + "database")
store.collectionName = "filemeta"
poolSize := configuration.GetInt(prefix + "option_pool_size")
return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize))
uri := configuration.GetString(prefix + "uri")
ssl := configuration.GetBool(prefix + "ssl")
sslCAFile := configuration.GetString(prefix + "ssl_ca_file")
sslCertFile := configuration.GetString(prefix + "ssl_cert_file")
sslKeyFile := configuration.GetString(prefix + "ssl_key_file")
username := configuration.GetString(prefix + "username")
password := configuration.GetString(prefix + "password")
insecure_skip_verify := configuration.GetBool(prefix + "insecure_skip_verify")
return store.connection(uri, uint64(poolSize), ssl, sslCAFile, sslCertFile, sslKeyFile, username, password, insecure_skip_verify)
}
func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
func (store *MongodbStore) connection(uri string, poolSize uint64, ssl bool, sslCAFile, sslCertFile, sslKeyFile string, username, password string, insecure bool) (err error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
opts := options.Client().ApplyURI(uri)
if poolSize > 0 {
opts.SetMaxPoolSize(poolSize)
}
if ssl {
tlsConfig, err := configureTLS(sslCAFile, sslCertFile, sslKeyFile, insecure)
if err != nil {
return err
}
opts.SetTLSConfig(tlsConfig)
}
if username != "" && password != "" {
creds := options.Credential{
Username: username,
Password: password,
}
opts.SetAuth(creds)
}
client, err := mongo.Connect(ctx, opts)
if err != nil {
return err
@ -55,10 +86,36 @@ func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) {
c := client.Database(store.database).Collection(store.collectionName)
err = store.indexUnique(c)
store.connect = client
return err
}
func configureTLS(caFile, certFile, keyFile string, insecure bool) (*tls.Config, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, fmt.Errorf("could not load client key pair: %s", err)
}
caCert, err := os.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("could not read CA certificate: %s", err)
}
caCertPool := x509.NewCertPool()
if !caCertPool.AppendCertsFromPEM(caCert) {
return nil, fmt.Errorf("failed to append CA certificate")
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: insecure,
}
return tlsConfig, nil
}
func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error {
_, err := c.Indexes().CreateOne(context.Background(), index, opts)
return err
@ -93,13 +150,10 @@ func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
}
func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.UpdateEntry(ctx, entry)
}
func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
@ -126,7 +180,6 @@ func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
}
func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
dir, name := fullpath.DirAndName()
var data Model
@ -154,7 +207,6 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
}
func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
where := bson.M{"directory": dir, "name": name}
@ -167,7 +219,6 @@ func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPa
}
func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
where := bson.M{"directory": fullpath}
_, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where)
if err != nil {
@ -182,7 +233,6 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
}
func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
var where = bson.M{"directory": string(dirPath), "name": bson.M{"$gt": startFileName}}
if includeStartFile {
where["name"] = bson.M{

12
weed/filer/remote_storage.go

@ -21,13 +21,13 @@ const REMOTE_STORAGE_CONF_SUFFIX = ".conf"
const REMOTE_STORAGE_MOUNT_FILE = "mount.mapping"
type FilerRemoteStorage struct {
rules ptrie.Trie
rules ptrie.Trie[*remote_pb.RemoteStorageLocation]
storageNameToConf map[string]*remote_pb.RemoteConf
}
func NewFilerRemoteStorage() (rs *FilerRemoteStorage) {
rs = &FilerRemoteStorage{
rules: ptrie.New(),
rules: ptrie.New[*remote_pb.RemoteStorageLocation](),
storageNameToConf: make(map[string]*remote_pb.RemoteConf),
}
return rs
@ -82,9 +82,9 @@ func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc
}
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *remote_pb.RemoteStorageLocation) {
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
rs.rules.MatchPrefix([]byte(p), func(key []byte, value *remote_pb.RemoteStorageLocation) bool {
mountDir = util.FullPath(string(key[:len(key)-1]))
remoteLocation = value.(*remote_pb.RemoteStorageLocation)
remoteLocation = value
return true
})
return
@ -92,8 +92,8 @@ func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) {
var storageLocation *remote_pb.RemoteStorageLocation
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
storageLocation = value.(*remote_pb.RemoteStorageLocation)
rs.rules.MatchPrefix([]byte(p), func(key []byte, value *remote_pb.RemoteStorageLocation) bool {
storageLocation = value
return true
})

17
weed/filer/stream.go

@ -69,11 +69,17 @@ func NewFileReader(filerClient filer_pb.FilerClient, entry *filer_pb.Entry) io.R
type DoStreamContent func(writer io.Writer) error
func PrepareStreamContent(masterClient wdclient.HasLookupFileIdFunction, chunks []*filer_pb.FileChunk, offset int64, size int64) (DoStreamContent, error) {
return PrepareStreamContentWithThrottler(masterClient, chunks, offset, size, 0)
func PrepareStreamContent(masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64) (DoStreamContent, error) {
return PrepareStreamContentWithThrottler(masterClient, jwtFunc, chunks, offset, size, 0)
}
func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) {
type VolumeServerJwtFunction func(fileId string) string
func noJwtFunc(string) string {
return ""
}
func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) {
glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks))
chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
@ -119,7 +125,8 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc
}
urlStrings := fileId2Url[chunkView.FileId]
start := time.Now()
err := retriedStreamFetchChunkData(writer, urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize))
jwt := jwtFunc(chunkView.FileId)
err := retriedStreamFetchChunkData(writer, urlStrings, jwt, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize))
offset += int64(chunkView.ViewSize)
remaining -= int64(chunkView.ViewSize)
stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds())
@ -143,7 +150,7 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc
}
func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
streamFn, err := PrepareStreamContent(masterClient, chunks, offset, size)
streamFn, err := PrepareStreamContent(masterClient, noJwtFunc, chunks, offset, size)
if err != nil {
return err
}

2
weed/filer/ydb/ydb_store.go

@ -79,7 +79,7 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix
}
opts := []ydb.Option{
ydb.WithDialTimeout(time.Duration(dialTimeOut) * time.Second),
environ.WithEnvironCredentials(ctx),
environ.WithEnvironCredentials(),
}
if poolSizeLimit > 0 {
opts = append(opts, ydb.WithSessionPoolSizeLimit(poolSizeLimit))

2
weed/ftpd/ftp_server.go

@ -29,7 +29,7 @@ type SftpServer struct {
var _ = ftpserver.MainDriver(&SftpServer{})
// NewServer returns a new FTP server driver
// NewFtpServer returns a new FTP server driver
func NewFtpServer(ftpListener net.Listener, option *FtpServerOption) (*SftpServer, error) {
var err error
server := &SftpServer{

5
weed/iamapi/iamapi_management_handlers.go

@ -33,6 +33,7 @@ const (
StatementActionReadAcp = "GetBucketAcl"
StatementActionList = "List*"
StatementActionTagging = "Tagging*"
StatementActionDelete = "DeleteBucket*"
)
var (
@ -58,6 +59,8 @@ func MapToStatementAction(action string) string {
return s3_constants.ACTION_LIST
case StatementActionTagging:
return s3_constants.ACTION_TAGGING
case StatementActionDelete:
return s3_constants.ACTION_DELETE_BUCKET
default:
return ""
}
@ -79,6 +82,8 @@ func MapToIdentitiesAction(action string) string {
return StatementActionList
case s3_constants.ACTION_TAGGING:
return StatementActionTagging
case s3_constants.ACTION_DELETE_BUCKET:
return StatementActionDelete
default:
return ""
}

2
weed/iamapi/iamapi_server.go

@ -72,7 +72,7 @@ func (iama *IamApiServer) registerRouter(router *mux.Router) {
// ListBuckets
// apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST"))
apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
apiRouter.Methods(http.MethodPost).Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
//
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(s3err.NotFoundHandler)

2
weed/iamapi/iamapi_test.go

@ -189,7 +189,7 @@ func TestDeleteUser(t *testing.T) {
func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) {
rr := httptest.NewRecorder()
apiRouter := mux.NewRouter().SkipClean(true)
apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions)
apiRouter.Path("/").Methods(http.MethodPost).HandlerFunc(ias.DoActions)
apiRouter.ServeHTTP(rr, req)
return rr, xml.Unmarshal(rr.Body.Bytes(), &v)
}

2
weed/images/cropping.go

@ -8,7 +8,7 @@ import (
"image/png"
"io"
"github.com/disintegration/imaging"
"github.com/cognusion/imaging"
"github.com/seaweedfs/seaweedfs/weed/glog"
)

2
weed/images/resizing.go

@ -8,7 +8,7 @@ import (
"image/png"
"io"
"github.com/disintegration/imaging"
"github.com/cognusion/imaging"
"github.com/seaweedfs/seaweedfs/weed/glog"

6
weed/mount/inode_to_path.go

@ -114,9 +114,9 @@ func (i *InodeToPath) AllocateInode(path util.FullPath, unixTime int64) uint64 {
return inode
}
func (i *InodeToPath) GetInode(path util.FullPath) uint64 {
func (i *InodeToPath) GetInode(path util.FullPath) (uint64, bool) {
if path == "/" {
return 1
return 1, true
}
i.Lock()
defer i.Unlock()
@ -125,7 +125,7 @@ func (i *InodeToPath) GetInode(path util.FullPath) uint64 {
// glog.Fatalf("GetInode unknown inode for %s", path)
// this could be the parent for mount point
}
return inode
return inode, found
}
func (i *InodeToPath) GetPath(inode uint64) (util.FullPath, fuse.Status) {

8
weed/mount/page_writer/page_chunk_mem.go

@ -63,11 +63,11 @@ func (mc *MemChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64)
logicStart := max(off, memChunkBaseOffset+t.StartOffset)
logicStop := min(off+int64(len(p)), memChunkBaseOffset+t.stopOffset)
if logicStart < logicStop {
copy(p[logicStart-off:logicStop-off], mc.buf[logicStart-memChunkBaseOffset:logicStop-memChunkBaseOffset])
maxStop = max(maxStop, logicStop)
if t.TsNs >= tsNs {
copy(p[logicStart-off:logicStop-off], mc.buf[logicStart-memChunkBaseOffset:logicStop-memChunkBaseOffset])
maxStop = max(maxStop, logicStop)
} else {
println("read old data1", tsNs-t.TsNs, "ns")
println("read new data1", t.TsNs-tsNs, "ns")
}
}
}

16
weed/mount/page_writer/page_chunk_swapfile.go

@ -129,15 +129,15 @@ func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop in
logicStart := max(off, chunkStartOffset+t.StartOffset)
logicStop := min(off+int64(len(p)), chunkStartOffset+t.stopOffset)
if logicStart < logicStop {
actualStart := logicStart - chunkStartOffset + int64(sc.actualChunkIndex)*sc.swapfile.chunkSize
if _, err := sc.swapfile.file.ReadAt(p[logicStart-off:logicStop-off], actualStart); err != nil {
glog.Errorf("failed to reading swap file %s: %v", sc.swapfile.file.Name(), err)
break
}
maxStop = max(maxStop, logicStop)
if t.TsNs >= tsNs {
actualStart := logicStart - chunkStartOffset + int64(sc.actualChunkIndex)*sc.swapfile.chunkSize
if _, err := sc.swapfile.file.ReadAt(p[logicStart-off:logicStop-off], actualStart); err != nil {
glog.Errorf("failed to reading swap file %s: %v", sc.swapfile.file.Name(), err)
break
}
maxStop = max(maxStop, logicStop)
} else {
println("read old data2", tsNs-t.TsNs, "ns")
println("read new data2", t.TsNs-tsNs, "ns")
}
}
}

23
weed/mount/weedfs.go

@ -105,6 +105,29 @@ func NewSeaweedFileSystem(option *Option) *WFS {
}, func(path util.FullPath) bool {
return wfs.inodeToPath.IsChildrenCached(path)
}, func(filePath util.FullPath, entry *filer_pb.Entry) {
// Find inode if it is not a deleted path
if inode, inode_found := wfs.inodeToPath.GetInode(filePath); inode_found {
// Find open file handle
if fh, fh_found := wfs.fhmap.FindFileHandle(inode); fh_found {
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("invalidateFunc", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
fh.entryLock.Lock()
defer fh.entryLock.Unlock()
// Recreate dirty pages
fh.dirtyPages.Destroy()
fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit)
// Update handle entry
newentry, status := wfs.maybeLoadEntry(filePath)
if status == fuse.OK {
if fh.GetEntry() != newentry {
fh.SetEntry(newentry)
}
}
}
}
})
grace.OnInterrupt(func() {
wfs.metaCache.Shutdown()

6
weed/mount/weedfs_file_sync.go

@ -104,9 +104,6 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
}
}
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("doFlush", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
if !fh.dirtyMetadata {
return fuse.OK
}
@ -115,6 +112,9 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
return fuse.Status(syscall.ENOSPC)
}
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("doFlush", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
fh.entryLock.Lock()
defer fh.entryLock.Unlock()

9
weed/mq/broker/broker_grpc_assign.go

@ -14,7 +14,6 @@ import (
// AssignTopicPartitions Runs on the assigned broker, to execute the topic partition assignment
func (b *MessageQueueBroker) AssignTopicPartitions(c context.Context, request *mq_pb.AssignTopicPartitionsRequest) (*mq_pb.AssignTopicPartitionsResponse, error) {
ret := &mq_pb.AssignTopicPartitionsResponse{}
self := pb.ServerAddress(fmt.Sprintf("%s:%d", b.option.Ip, b.option.Port))
// drain existing topic partition subscriptions
for _, assignment := range request.BrokerPartitionAssignments {
@ -23,12 +22,12 @@ func (b *MessageQueueBroker) AssignTopicPartitions(c context.Context, request *m
b.accessLock.Lock()
if request.IsDraining {
// TODO drain existing topic partition subscriptions
b.localTopicManager.RemoveTopicPartition(t, partition)
b.localTopicManager.RemoveLocalPartition(t, partition)
} else {
var localPartition *topic.LocalPartition
if localPartition = b.localTopicManager.GetTopicPartition(t, partition); localPartition == nil {
localPartition = topic.FromPbBrokerPartitionAssignment(self, partition, assignment, b.genLogFlushFunc(t, assignment.Partition), b.genLogOnDiskReadFunc(t, assignment.Partition))
b.localTopicManager.AddTopicPartition(t, localPartition)
if localPartition = b.localTopicManager.GetLocalPartition(t, partition); localPartition == nil {
localPartition = topic.NewLocalPartition(partition, b.genLogFlushFunc(t, assignment.Partition), b.genLogOnDiskReadFunc(t, assignment.Partition))
b.localTopicManager.AddLocalPartition(t, localPartition)
}
}
b.accessLock.Unlock()

9
weed/mq/broker/broker_grpc_configure.go

@ -5,6 +5,7 @@ import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
@ -27,6 +28,13 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
return resp, err
}
// validate the schema
if request.RecordType != nil {
if _, err = schema.NewSchema(request.RecordType); err != nil {
return nil, status.Errorf(codes.InvalidArgument, "invalid record type %+v: %v", request.RecordType, err)
}
}
t := topic.FromPbTopic(request.Topic)
var readErr, assignErr error
resp, readErr = b.readTopicConfFromFiler(t)
@ -56,6 +64,7 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
return nil, status.Errorf(codes.Unavailable, pub_balancer.ErrNoBroker.Error())
}
resp.BrokerPartitionAssignments = pub_balancer.AllocateTopicPartitions(b.Balancer.Brokers, request.PartitionCount)
resp.RecordType = request.RecordType
// save the topic configuration on filer
if err := b.saveTopicConfToFiler(request.Topic, resp); err != nil {

134
weed/mq/broker/broker_grpc_pub.go

@ -5,13 +5,13 @@ import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"google.golang.org/grpc/peer"
"io"
"math/rand"
"net"
"sync/atomic"
"time"
)
// PUB
@ -40,73 +40,85 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
// 2. find the topic metadata owning filer
// 3. write to the filer
var localTopicPartition *topic.LocalPartition
req, err := stream.Recv()
if err != nil {
return err
}
response := &mq_pb.PublishMessageResponse{}
// TODO check whether current broker should be the leader for the topic partition
ackInterval := 1
initMessage := req.GetInit()
var t topic.Topic
var p topic.Partition
if initMessage != nil {
t, p = topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
localTopicPartition, _, err = b.GetOrGenLocalPartition(t, p)
if err != nil {
response.Error = fmt.Sprintf("topic %v partition %v not setup", initMessage.Topic, initMessage.Partition)
glog.Errorf("topic %v partition %v not setup", initMessage.Topic, initMessage.Partition)
return stream.Send(response)
}
ackInterval = int(initMessage.AckInterval)
for _, follower := range initMessage.FollowerBrokers {
followErr := b.withBrokerClient(false, pb.ServerAddress(follower), func(client mq_pb.SeaweedMessagingClient) error {
_, err := client.PublishFollowMe(context.Background(), &mq_pb.PublishFollowMeRequest{
Topic: initMessage.Topic,
Partition: initMessage.Partition,
BrokerSelf: string(b.option.BrokerAddress()),
})
return err
})
if followErr != nil {
response.Error = fmt.Sprintf("follower %v failed: %v", follower, followErr)
glog.Errorf("follower %v failed: %v", follower, followErr)
return stream.Send(response)
}
}
stream.Send(response)
} else {
if initMessage == nil {
response.Error = fmt.Sprintf("missing init message")
glog.Errorf("missing init message")
return stream.Send(response)
}
// get or generate a local partition
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
localTopicPartition, getOrGenErr := b.GetOrGenerateLocalPartition(t, p)
if getOrGenErr != nil {
response.Error = fmt.Sprintf("topic %v not found: %v", t, getOrGenErr)
glog.Errorf("topic %v not found: %v", t, getOrGenErr)
return stream.Send(response)
}
// connect to follower brokers
if followerErr := localTopicPartition.MaybeConnectToFollowers(initMessage, b.grpcDialOption); followerErr != nil {
response.Error = followerErr.Error()
glog.Errorf("MaybeConnectToFollowers: %v", followerErr)
return stream.Send(response)
}
var receivedSequence, acknowledgedSequence int64
var isClosed bool
// start sending ack to publisher
ackInterval := int64(1)
if initMessage.AckInterval > 0 {
ackInterval = int64(initMessage.AckInterval)
}
go func() {
defer func() {
// println("stop sending ack to publisher", initMessage.PublisherName)
}()
lastAckTime := time.Now()
for !isClosed {
receivedSequence = atomic.LoadInt64(&localTopicPartition.AckTsNs)
if acknowledgedSequence < receivedSequence && (receivedSequence-acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 1*time.Second) {
acknowledgedSequence = receivedSequence
response := &mq_pb.PublishMessageResponse{
AckSequence: acknowledgedSequence,
}
if err := stream.Send(response); err != nil {
glog.Errorf("Error sending response %v: %v", response, err)
}
// println("sent ack", acknowledgedSequence, "=>", initMessage.PublisherName)
lastAckTime = time.Now()
} else {
time.Sleep(1 * time.Second)
}
}
}()
// process each published messages
clientName := fmt.Sprintf("%v-%4d/%s/%v", findClientAddress(stream.Context()), rand.Intn(10000), initMessage.Topic, initMessage.Partition)
localTopicPartition.Publishers.AddPublisher(clientName, topic.NewLocalPublisher())
ackCounter := 0
var ackSequence int64
var isStopping int32
respChan := make(chan *mq_pb.PublishMessageResponse, 128)
defer func() {
atomic.StoreInt32(&isStopping, 1)
respChan <- &mq_pb.PublishMessageResponse{
AckSequence: ackSequence,
}
close(respChan)
// remove the publisher
localTopicPartition.Publishers.RemovePublisher(clientName)
glog.V(0).Infof("topic %v partition %v published %d messges Publisher:%d Subscriber:%d", initMessage.Topic, initMessage.Partition, ackSequence, localTopicPartition.Publishers.Size(), localTopicPartition.Subscribers.Size())
if localTopicPartition.MaybeShutdownLocalPartition() {
b.localTopicManager.RemoveTopicPartition(t, p)
b.localTopicManager.RemoveLocalPartition(t, p)
glog.V(0).Infof("Removed local topic %v partition %v", initMessage.Topic, initMessage.Partition)
}
}()
go func() {
for resp := range respChan {
if err := stream.Send(resp); err != nil {
glog.Errorf("Error sending response %v: %v", resp, err)
}
}
// send a hello message
stream.Send(&mq_pb.PublishMessageResponse{})
defer func() {
isClosed = true
}()
// process each published messages
@ -117,28 +129,26 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
if err == io.EOF {
break
}
glog.V(0).Infof("topic %v partition %v publish stream error: %v", initMessage.Topic, initMessage.Partition, err)
return err
glog.V(0).Infof("topic %v partition %v publish stream from %s error: %v", initMessage.Topic, initMessage.Partition, initMessage.PublisherName, err)
break
}
// Process the received message
if dataMessage := req.GetData(); dataMessage != nil {
localTopicPartition.Publish(dataMessage)
dataMessage := req.GetData()
if dataMessage == nil {
continue
}
ackCounter++
ackSequence++
if ackCounter >= ackInterval {
ackCounter = 0
// send back the ack
response := &mq_pb.PublishMessageResponse{
AckSequence: ackSequence,
}
respChan <- response
// The control message should still be sent to the follower
// to avoid timing issue when ack messages.
// send to the local partition
if err = localTopicPartition.Publish(dataMessage); err != nil {
return fmt.Errorf("topic %v partition %v publish error: %v", initMessage.Topic, initMessage.Partition, err)
}
}
glog.V(0).Infof("topic %v partition %v publish stream closed.", initMessage.Topic, initMessage.Partition)
glog.V(0).Infof("topic %v partition %v publish stream from %s closed.", initMessage.Topic, initMessage.Partition, initMessage.PublisherName)
return nil
}

198
weed/mq/broker/broker_grpc_pub_follow.go

@ -1,96 +1,150 @@
package broker
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/util/buffered_queue"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"io"
"math/rand"
"sync"
"time"
)
func (b *MessageQueueBroker) PublishFollowMe(c context.Context, request *mq_pb.PublishFollowMeRequest) (*mq_pb.PublishFollowMeResponse, error) {
glog.V(0).Infof("PublishFollowMe %v", request)
var wg sync.WaitGroup
wg.Add(1)
var ret error
go b.withBrokerClient(true, pb.ServerAddress(request.BrokerSelf), func(client mq_pb.SeaweedMessagingClient) error {
followerId := rand.Int31()
subscribeClient, err := client.FollowInMemoryMessages(context.Background(), &mq_pb.FollowInMemoryMessagesRequest{
Message: &mq_pb.FollowInMemoryMessagesRequest_Init{
Init: &mq_pb.FollowInMemoryMessagesRequest_InitMessage{
ConsumerGroup: string(b.option.BrokerAddress()),
ConsumerId: fmt.Sprintf("followMe-%d", followerId),
FollowerId: followerId,
Topic: request.Topic,
PartitionOffset: &mq_pb.PartitionOffset{
Partition: request.Partition,
StartTsNs: 0,
StartType: mq_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY,
},
},
},
})
type memBuffer struct {
buf []byte
startTime time.Time
stopTime time.Time
}
if err != nil {
glog.Errorf("FollowInMemoryMessages error: %v", err)
ret = err
return err
}
func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) {
var req *mq_pb.PublishFollowMeRequest
req, err = stream.Recv()
if err != nil {
return err
}
initMessage := req.GetInit()
if initMessage == nil {
return fmt.Errorf("missing init message")
}
// create an in-memory queue of buffered messages
inMemoryBuffers := buffered_queue.NewBufferedQueue[memBuffer](4)
logBuffer := b.buildFollowerLogBuffer(inMemoryBuffers)
lastFlushTsNs := time.Now().UnixNano()
// receive first hello message
resp, err := subscribeClient.Recv()
// follow each published messages
for {
// receive a message
req, err = stream.Recv()
if err != nil {
return fmt.Errorf("FollowInMemoryMessages recv first message error: %v", err)
}
if resp == nil {
glog.V(0).Infof("doFollowInMemoryMessage recv first message nil response")
return io.ErrUnexpectedEOF
if err == io.EOF {
err = nil
break
}
glog.V(0).Infof("topic %v partition %v publish stream error: %v", initMessage.Topic, initMessage.Partition, err)
break
}
wg.Done()
b.doFollowInMemoryMessage(context.Background(), subscribeClient)
// Process the received message
if dataMessage := req.GetData(); dataMessage != nil {
return nil
})
wg.Wait()
return &mq_pb.PublishFollowMeResponse{}, ret
}
// TODO: change this to DataMessage
// log the message
logBuffer.AddToBuffer(dataMessage)
func (b *MessageQueueBroker) doFollowInMemoryMessage(c context.Context, client mq_pb.SeaweedMessaging_FollowInMemoryMessagesClient) {
for {
resp, err := client.Recv()
if err != nil {
if err != io.EOF {
glog.V(0).Infof("doFollowInMemoryMessage error: %v", err)
// send back the ack
if err := stream.Send(&mq_pb.PublishFollowMeResponse{
AckTsNs: dataMessage.TsNs,
}); err != nil {
glog.Errorf("Error sending response %v: %v", dataMessage, err)
}
// println("ack", string(dataMessage.Key), dataMessage.TsNs)
} else if closeMessage := req.GetClose(); closeMessage != nil {
glog.V(0).Infof("topic %v partition %v publish stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage)
break
} else if flushMessage := req.GetFlush(); flushMessage != nil {
glog.V(0).Infof("topic %v partition %v publish stream flushed: %v", initMessage.Topic, initMessage.Partition, flushMessage)
lastFlushTsNs = flushMessage.TsNs
// drop already flushed messages
for mem, found := inMemoryBuffers.PeekHead(); found; mem, found = inMemoryBuffers.PeekHead() {
if mem.stopTime.UnixNano() <= flushMessage.TsNs {
inMemoryBuffers.Dequeue()
// println("dropping flushed messages: ", mem.startTime.UnixNano(), mem.stopTime.UnixNano(), len(mem.buf))
} else {
break
}
}
return
} else {
glog.Errorf("unknown message: %v", req)
}
}
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
logBuffer.ShutdownLogBuffer()
// wait until all messages are sent to inMemoryBuffers
for !logBuffer.IsAllFlushed() {
time.Sleep(113 * time.Millisecond)
}
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
// flush the remaining messages
inMemoryBuffers.CloseInput()
for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() {
if len(mem.buf) == 0 {
continue
}
if resp == nil {
glog.V(0).Infof("doFollowInMemoryMessage nil response")
return
startTime, stopTime := mem.startTime.UTC(), mem.stopTime.UTC()
if stopTime.UnixNano() <= lastFlushTsNs {
glog.V(0).Infof("dropping remaining data at %v %v", t, p)
continue
}
if resp.Message != nil {
// process ctrl message or data message
switch m := resp.Message.(type) {
case *mq_pb.FollowInMemoryMessagesResponse_Data:
// process data message
print("d")
case *mq_pb.FollowInMemoryMessagesResponse_Ctrl:
// process ctrl message
if m.Ctrl.FlushedSequence > 0 {
flushTime := time.Unix(0, m.Ctrl.FlushedSequence)
glog.V(0).Infof("doFollowInMemoryMessage flushTime: %v", flushTime)
}
if m.Ctrl.FollowerChangedToId != 0 {
// follower changed
glog.V(0).Infof("doFollowInMemoryMessage follower changed to %d", m.Ctrl.FollowerChangedToId)
return
}
// TODO trim data earlier than lastFlushTsNs
targetFile := fmt.Sprintf("%s/%s", partitionDir, startTime.Format(topic.TIME_FORMAT))
for {
if err := b.appendToFile(targetFile, mem.buf); err != nil {
glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err)
time.Sleep(737 * time.Millisecond)
} else {
break
}
}
glog.V(0).Infof("flushed remaining data at %v to %s size %d", mem.stopTime.UnixNano(), targetFile, len(mem.buf))
}
glog.V(0).Infof("shut down follower for %v %v", t, p)
return err
}
func (b *MessageQueueBroker) buildFollowerLogBuffer(inMemoryBuffers *buffered_queue.BufferedQueue[memBuffer]) *log_buffer.LogBuffer {
lb := log_buffer.NewLogBuffer("follower",
2*time.Minute, func(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte) {
if len(buf) == 0 {
return
}
inMemoryBuffers.Enqueue(memBuffer{
buf: buf,
startTime: startTime,
stopTime: stopTime,
})
glog.V(0).Infof("queue up %d~%d size %d", startTime.UnixNano(), stopTime.UnixNano(), len(buf))
}, nil, func() {
})
return lb
}

212
weed/mq/broker/broker_grpc_sub.go

@ -8,7 +8,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"sync/atomic"
"time"
)
@ -17,40 +16,20 @@ func (b *MessageQueueBroker) SubscribeMessage(req *mq_pb.SubscribeMessageRequest
ctx := stream.Context()
clientName := fmt.Sprintf("%s/%s-%s", req.GetInit().ConsumerGroup, req.GetInit().ConsumerId, req.GetInit().ClientId)
initMessage := req.GetInit()
if initMessage == nil {
glog.Errorf("missing init message")
return fmt.Errorf("missing init message")
}
t := topic.FromPbTopic(req.GetInit().Topic)
partition := topic.FromPbPartition(req.GetInit().GetPartitionOffset().GetPartition())
glog.V(0).Infof("Subscriber %s on %v %v connected", req.GetInit().ConsumerId, t, partition)
waitIntervalCount := 0
var localTopicPartition *topic.LocalPartition
for localTopicPartition == nil {
localTopicPartition, _, err = b.GetOrGenLocalPartition(t, partition)
if err != nil {
glog.V(1).Infof("topic %v partition %v not setup", t, partition)
}
if localTopicPartition != nil {
break
}
waitIntervalCount++
if waitIntervalCount > 10 {
waitIntervalCount = 10
}
time.Sleep(time.Duration(waitIntervalCount) * 337 * time.Millisecond)
// Check if the client has disconnected by monitoring the context
select {
case <-ctx.Done():
err := ctx.Err()
if err == context.Canceled {
// Client disconnected
return nil
}
glog.V(0).Infof("Subscriber %s disconnected: %v", clientName, err)
return nil
default:
// Continue processing the request
}
localTopicPartition, getOrGenErr := b.GetOrGenerateLocalPartition(t, partition)
if getOrGenErr != nil {
return getOrGenErr
}
localTopicPartition.Subscribers.AddSubscriber(clientName, topic.NewLocalSubscriber())
@ -64,7 +43,7 @@ func (b *MessageQueueBroker) SubscribeMessage(req *mq_pb.SubscribeMessageRequest
localTopicPartition.Subscribers.RemoveSubscriber(clientName)
glog.V(0).Infof("Subscriber %s on %v %v disconnected, sent %d", clientName, t, partition, counter)
if localTopicPartition.MaybeShutdownLocalPartition() {
b.localTopicManager.RemoveTopicPartition(t, partition)
b.localTopicManager.RemoveLocalPartition(t, partition)
}
}()
@ -129,174 +108,3 @@ func getRequestPosition(offset *mq_pb.PartitionOffset) (startPosition log_buffer
}
return
}
func (b *MessageQueueBroker) FollowInMemoryMessages(req *mq_pb.FollowInMemoryMessagesRequest, stream mq_pb.SeaweedMessaging_FollowInMemoryMessagesServer) (err error) {
ctx := stream.Context()
clientName := req.GetInit().ConsumerId
t := topic.FromPbTopic(req.GetInit().Topic)
partition := topic.FromPbPartition(req.GetInit().GetPartitionOffset().GetPartition())
glog.V(0).Infof("FollowInMemoryMessages %s on %v %v connected", clientName, t, partition)
waitIntervalCount := 0
var localTopicPartition *topic.LocalPartition
for localTopicPartition == nil {
localTopicPartition, _, err = b.GetOrGenLocalPartition(t, partition)
if err != nil {
glog.V(1).Infof("topic %v partition %v not setup", t, partition)
}
if localTopicPartition != nil {
break
}
waitIntervalCount++
if waitIntervalCount > 32 {
waitIntervalCount = 32
}
time.Sleep(time.Duration(waitIntervalCount) * 137 * time.Millisecond)
// Check if the client has disconnected by monitoring the context
select {
case <-ctx.Done():
err := ctx.Err()
if err == context.Canceled {
// Client disconnected
return nil
}
glog.V(0).Infof("FollowInMemoryMessages %s disconnected: %v", clientName, err)
return nil
default:
// Continue processing the request
}
}
// set the current follower id
followerId := req.GetInit().FollowerId
atomic.StoreInt32(&localTopicPartition.FollowerId, followerId)
glog.V(0).Infof("FollowInMemoryMessages %s connected on %v %v", clientName, t, partition)
isConnected := true
sleepIntervalCount := 0
var counter int64
defer func() {
isConnected = false
glog.V(0).Infof("FollowInMemoryMessages %s on %v %v disconnected, sent %d", clientName, t, partition, counter)
}()
// send first hello message
// to indicate the follower is connected
stream.Send(&mq_pb.FollowInMemoryMessagesResponse{
Message: &mq_pb.FollowInMemoryMessagesResponse_Ctrl{
Ctrl: &mq_pb.FollowInMemoryMessagesResponse_CtrlMessage{},
},
})
var startPosition log_buffer.MessagePosition
if req.GetInit() != nil && req.GetInit().GetPartitionOffset() != nil {
startPosition = getRequestPosition(req.GetInit().GetPartitionOffset())
}
var prevFlushTsNs int64
_, _, err = localTopicPartition.LogBuffer.LoopProcessLogData(clientName, startPosition, 0, func() bool {
if !isConnected {
return false
}
sleepIntervalCount++
if sleepIntervalCount > 32 {
sleepIntervalCount = 32
}
time.Sleep(time.Duration(sleepIntervalCount) * 137 * time.Millisecond)
if localTopicPartition.LogBuffer.IsStopping() {
newFollowerId := atomic.LoadInt32(&localTopicPartition.FollowerId)
glog.V(0).Infof("FollowInMemoryMessages1 %s on %v %v follower id changed to %d", clientName, t, partition, newFollowerId)
stream.Send(&mq_pb.FollowInMemoryMessagesResponse{
Message: &mq_pb.FollowInMemoryMessagesResponse_Ctrl{
Ctrl: &mq_pb.FollowInMemoryMessagesResponse_CtrlMessage{
FollowerChangedToId: newFollowerId,
},
},
})
return false
}
// Check if the client has disconnected by monitoring the context
select {
case <-ctx.Done():
err := ctx.Err()
if err == context.Canceled {
// Client disconnected
return false
}
glog.V(0).Infof("Subscriber %s disconnected: %v", clientName, err)
return false
default:
// Continue processing the request
}
// send the last flushed sequence
flushTsNs := atomic.LoadInt64(&localTopicPartition.LogBuffer.LastFlushTsNs)
if flushTsNs != prevFlushTsNs {
prevFlushTsNs = flushTsNs
stream.Send(&mq_pb.FollowInMemoryMessagesResponse{
Message: &mq_pb.FollowInMemoryMessagesResponse_Ctrl{
Ctrl: &mq_pb.FollowInMemoryMessagesResponse_CtrlMessage{
FlushedSequence: flushTsNs,
},
},
})
}
return true
}, func(logEntry *filer_pb.LogEntry) (bool, error) {
// reset the sleep interval count
sleepIntervalCount = 0
// check the follower id
newFollowerId := atomic.LoadInt32(&localTopicPartition.FollowerId)
if newFollowerId != followerId {
glog.V(0).Infof("FollowInMemoryMessages2 %s on %v %v follower id %d changed to %d", clientName, t, partition, followerId, newFollowerId)
stream.Send(&mq_pb.FollowInMemoryMessagesResponse{
Message: &mq_pb.FollowInMemoryMessagesResponse_Ctrl{
Ctrl: &mq_pb.FollowInMemoryMessagesResponse_CtrlMessage{
FollowerChangedToId: newFollowerId,
},
},
})
return true, nil
}
// send the last flushed sequence
flushTsNs := atomic.LoadInt64(&localTopicPartition.LogBuffer.LastFlushTsNs)
if flushTsNs != prevFlushTsNs {
prevFlushTsNs = flushTsNs
stream.Send(&mq_pb.FollowInMemoryMessagesResponse{
Message: &mq_pb.FollowInMemoryMessagesResponse_Ctrl{
Ctrl: &mq_pb.FollowInMemoryMessagesResponse_CtrlMessage{
FlushedSequence: flushTsNs,
},
},
})
}
// send the log entry
if err := stream.Send(&mq_pb.FollowInMemoryMessagesResponse{
Message: &mq_pb.FollowInMemoryMessagesResponse_Data{
Data: &mq_pb.DataMessage{
Key: logEntry.Key,
Value: logEntry.Data,
TsNs: logEntry.TsNs,
},
}}); err != nil {
glog.Errorf("Error sending setup response: %v", err)
return false, err
}
counter++
return false, nil
})
return err
}

9
weed/mq/broker/broker_grpc_sub_coordinator.go

@ -39,18 +39,11 @@ func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMess
go func() {
// try to load the partition assignment from filer
if conf, err := b.readTopicConfFromFiler(topic.FromPbTopic(initMessage.Topic)); err == nil {
assignedPartitions := make([]*mq_pb.SubscriberToSubCoordinatorResponse_AssignedPartition, len(conf.BrokerPartitionAssignments))
for i, assignment := range conf.BrokerPartitionAssignments {
assignedPartitions[i] = &mq_pb.SubscriberToSubCoordinatorResponse_AssignedPartition{
Partition: assignment.Partition,
Broker: assignment.LeaderBroker,
}
}
// send partition assignment to subscriber
cgi.ResponseChan <- &mq_pb.SubscriberToSubCoordinatorResponse{
Message: &mq_pb.SubscriberToSubCoordinatorResponse_Assignment_{
Assignment: &mq_pb.SubscriberToSubCoordinatorResponse_Assignment{
AssignedPartitions: assignedPartitions,
PartitionAssignments: conf.BrokerPartitionAssignments,
},
},
}

5
weed/mq/broker/broker_server.go

@ -1,6 +1,7 @@
package broker
import (
"context"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
"github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator"
@ -68,9 +69,9 @@ func NewMessageBroker(option *MessageQueueBrokerOption, grpcDialOption grpc.Dial
pub_broker_balancer.OnAddBroker = mqBroker.Coordinator.OnSubAddBroker
pub_broker_balancer.OnRemoveBroker = mqBroker.Coordinator.OnSubRemoveBroker
go mqBroker.MasterClient.KeepConnectedToMaster()
go mqBroker.MasterClient.KeepConnectedToMaster(context.Background())
existingNodes := cluster.ListExistingPeerUpdates(mqBroker.MasterClient.GetMaster(), grpcDialOption, option.FilerGroup, cluster.FilerType)
existingNodes := cluster.ListExistingPeerUpdates(mqBroker.MasterClient.GetMaster(context.Background()), grpcDialOption, option.FilerGroup, cluster.FilerType)
for _, newNode := range existingNodes {
mqBroker.OnBrokerUpdate(newNode, time.Now())
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save