Browse Source

Update tikv client version and add one PC support

pull/3233/head
yulai.li 3 years ago
parent
commit
46e0b629e5
  1. 4
      .github/FUNDING.yml
  2. 10
      .github/dependabot.yml
  3. 10
      .github/pull_request_template.md
  4. 124
      .github/workflows/binaries_dev.yml
  5. 40
      .github/workflows/binaries_release0.yml
  6. 59
      .github/workflows/binaries_release1.yml
  7. 59
      .github/workflows/binaries_release2.yml
  8. 59
      .github/workflows/binaries_release3.yml
  9. 60
      .github/workflows/binaries_release4.yml
  10. 50
      .github/workflows/binary_test.yml
  11. 22
      .github/workflows/cleanup.yml
  12. 43
      .github/workflows/codeql.yml
  13. 66
      .github/workflows/container_dev.yml
  14. 82
      .github/workflows/container_latest.yml
  15. 121
      .github/workflows/container_release.yml
  16. 57
      .github/workflows/container_release1.yml
  17. 59
      .github/workflows/container_release2.yml
  18. 58
      .github/workflows/container_release3.yml
  19. 58
      .github/workflows/container_release4.yml
  20. 58
      .github/workflows/container_release5.yml
  21. 53
      .github/workflows/container_test.yml
  22. 14
      .github/workflows/depsreview.yml
  23. 17
      .github/workflows/go.yml
  24. 68
      .github/workflows/release.yml
  25. 4
      .gitignore
  26. 14
      Makefile
  27. 46
      README.md
  28. 8
      backers.md
  29. 56
      docker/Dockerfile
  30. 1
      docker/Dockerfile.gccgo_build
  31. 6
      docker/Dockerfile.go_build
  32. 1
      docker/Dockerfile.local
  33. 31
      docker/Dockerfile.rocksdb_large
  34. 2
      docker/Dockerfile.s3tests
  35. 31
      docker/Makefile
  36. 5
      docker/README.md
  37. 4
      docker/compose/fluent.json
  38. 36
      docker/compose/local-auditlog-compose.yml
  39. 29
      docker/compose/local-cluster-compose.yml
  40. 89
      docker/compose/local-hashicorp-raft-compose.yml
  41. 44
      docker/compose/local-nextcloud-compose.yml
  42. 4
      docker/compose/local-s3tests-compose.yml
  43. 21
      docker/compose/local-sync-mount-compose.yml
  44. 1
      docker/compose/master-cloud.toml
  45. 61
      docker/compose/test-etcd-filer.yml
  46. 35
      docker/compose/test-ydb-filer.yml
  47. 2
      docker/compose/tls.env
  48. 26
      docker/entrypoint.sh
  49. 3
      docker/filer_rocksdb.toml
  50. 1
      docker/prometheus/prometheus.yml
  51. 19
      docker/seaweedfs-compose.yml
  52. 2
      docker/seaweedfs.sql
  53. 286
      go.mod
  54. 1679
      go.sum
  55. 4
      k8s/helm_charts2/Chart.yaml
  56. 15
      k8s/helm_charts2/templates/_helpers.tpl
  57. 58
      k8s/helm_charts2/templates/cronjob.yaml
  58. 2
      k8s/helm_charts2/templates/filer-servicemonitor.yaml
  59. 40
      k8s/helm_charts2/templates/filer-statefulset.yaml
  60. 90
      k8s/helm_charts2/templates/ingress.yaml
  61. 8
      k8s/helm_charts2/templates/s3-deployment.yaml
  62. 2
      k8s/helm_charts2/templates/s3-servicemonitor.yaml
  63. 8
      k8s/helm_charts2/templates/seaweedfs-s3-secret.yaml
  64. 4
      k8s/helm_charts2/templates/service-account.yaml
  65. 2
      k8s/helm_charts2/templates/volume-servicemonitor.yaml
  66. 2
      k8s/helm_charts2/templates/volume-statefulset.yaml
  67. 72
      k8s/helm_charts2/values.yaml
  68. BIN
      note/SeaweedFS_Gateway_RemoteObjectStore.png
  69. 8
      other/java/client/pom.xml
  70. 6
      other/java/client/pom.xml.deploy
  71. 4
      other/java/client/pom_debug.xml
  72. 2
      other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
  73. 53
      other/java/client/src/main/java/seaweedfs/client/FilerClient.java
  74. 11
      other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
  75. 109
      other/java/client/src/main/java/seaweedfs/client/ReadChunks.java
  76. 6
      other/java/client/src/main/java/seaweedfs/client/RemoteUtil.java
  77. 2
      other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java
  78. 90
      other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
  79. 26
      other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java
  80. 30
      other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
  81. 2
      other/java/client/src/main/java/seaweedfs/client/VolumeIdCache.java
  82. 52
      other/java/client/src/main/proto/filer.proto
  83. 123
      other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java
  84. 6
      other/java/examples/pom.xml
  85. 2
      other/java/hdfs-over-ftp/pom.xml
  86. 4
      other/java/hdfs2/dependency-reduced-pom.xml
  87. 6
      other/java/hdfs2/pom.xml
  88. 8
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
  89. 5
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  90. 4
      other/java/hdfs3/dependency-reduced-pom.xml
  91. 6
      other/java/hdfs3/pom.xml
  92. 8
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
  93. 5
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
  94. 16
      other/metrics/grafana_seaweedfs.json
  95. 1932
      other/metrics/grafana_seaweedfs_heartbeat.json
  96. 15
      test/s3/basic/basic_test.go
  97. 2
      test/s3/compatibility/.gitignore
  98. 11
      test/s3/compatibility/Dockerfile
  99. 13
      test/s3/compatibility/README.md
  100. 5
      test/s3/compatibility/prepare.sh

4
.github/FUNDING.yml

@ -0,0 +1,4 @@
# These are supported funding model platforms
github: chrislusf
patreon: seaweedfs

10
.github/dependabot.yml

@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: gomod
directory: "/"
schedule:
interval: weekly

10
.github/pull_request_template.md

@ -0,0 +1,10 @@
# What problem are we solving?
# How are we solving the problem?
# Checks
- [ ] I have added unit tests if possible.
- [ ] I will add related wiki document changes and link to this PR after merging.

124
.github/workflows/binaries_dev.yml

@ -0,0 +1,124 @@
name: "go: build dev binaries"
on:
push:
branches: [ master ]
permissions:
contents: read
jobs:
cleanup:
permissions:
contents: write # for mknejp/delete-release-assets to delete release assets
runs-on: ubuntu-latest
steps:
- name: Delete old release assets
uses: mknejp/delete-release-assets@a8aaab13272b1eaac16cc46dddd3f725b97ee05a # v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*
build_dev_linux_windows:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
needs: cleanup
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux, windows]
goarch: [amd64]
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-normal-disk
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
build_dev_darwin:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
needs: build_dev_linux_windows
runs-on: ubuntu-latest
strategy:
matrix:
goos: [darwin]
goarch: [amd64, arm64]
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-normal-disk
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"

40
.github/workflows/release_binaries.yml → .github/workflows/binaries_release0.yml

@ -1,44 +1,42 @@
# This is a basic workflow to help you get started with Actions # This is a basic workflow to help you get started with Actions
name: "go: build versioned binaries"
name: "go: build versioned binaries for windows"
on: on:
release:
types: [created]
push:
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab # Allows you to run this workflow manually from the Actions tab
workflow_dispatch: workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel # A workflow run is made up of one or more jobs that can run sequentially or in parallel
permissions:
contents: read
jobs: jobs:
build:
build-release-binaries_windows:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
goos: [linux, windows, darwin, freebsd, netbsd, openbsd]
goarch: [amd64, arm, arm64, 386]
exclude:
- goarch: arm
goos: darwin
- goarch: 386
goos: darwin
- goarch: arm
goos: windows
- goarch: arm64
goos: windows
goos: [windows]
goarch: [amd64]
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.20
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }} goarch: ${{ matrix.goarch }}
overwrite: true overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is # build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .` # Where to run `go build .`
@ -46,13 +44,13 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }} goarch: ${{ matrix.goarch }}
overwrite: true overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .` # Where to run `go build .`

59
.github/workflows/binaries_release1.yml

@ -0,0 +1,59 @@
# This is a basic workflow to help you get started with Actions
name: "go: build versioned binaries for linux"
on:
push:
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
permissions:
contents: read
jobs:
build-release-binaries_linux:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux]
goarch: [amd64, arm, arm64]
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"

59
.github/workflows/binaries_release2.yml

@ -0,0 +1,59 @@
# This is a basic workflow to help you get started with Actions
name: "go: build versioned binaries for darwin"
on:
push:
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
permissions:
contents: read
jobs:
build-release-binaries_darwin:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
runs-on: ubuntu-latest
strategy:
matrix:
goos: [darwin]
goarch: [amd64, arm64]
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"

59
.github/workflows/binaries_release3.yml

@ -0,0 +1,59 @@
# This is a basic workflow to help you get started with Actions
name: "go: build versioned binaries for freebsd"
on:
push:
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
permissions:
contents: read
jobs:
build-release-binaries_freebsd:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
runs-on: ubuntu-latest
strategy:
matrix:
goos: [freebsd]
goarch: [amd64, arm, arm64]
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"

60
.github/workflows/binaries_release4.yml

@ -0,0 +1,60 @@
# This is a basic workflow to help you get started with Actions
name: "go: build versioned binaries for linux with all tags"
on:
push:
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
permissions:
contents: read
jobs:
build-release-binaries_linux:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux]
goarch: [amd64]
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
build_flags: -tags elastic,ydb,gocdk
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset,elastic,ydb,gocdk
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full_large_disk"

50
.github/workflows/binary_test.yml

@ -1,50 +0,0 @@
name: "go: test building cross-platform binary"
on:
pull_request:
workflow_dispatch: []
jobs:
build:
name: Build
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux, windows, darwin, freebsd, netbsd, openbsd]
goarch: [amd64, arm, arm64, 386]
exclude:
- goarch: arm
goos: darwin
- goarch: 386
goos: darwin
- goarch: arm
goos: windows
- goarch: arm64
goos: windows
concurrency:
group: ${{ github.head_ref }}/binary_test/${{ matrix.goos }}/${{ matrix.goarch }}
cancel-in-progress: true
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
cd weed; go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Build
run: cd weed; GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} go build -v .

22
.github/workflows/cleanup.yml

@ -1,22 +0,0 @@
name: "chore: cleanup"
on:
push:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*

43
.github/workflows/codeql.yml

@ -0,0 +1,43 @@
name: "Code Scanning - Action"
on:
pull_request:
jobs:
CodeQL-Build:
# CodeQL runs on ubuntu-latest, windows-latest, and macos-latest
runs-on: ubuntu-latest
permissions:
# required for all workflows
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
# Override language selection by uncommenting this and choosing your languages
with:
languages: go
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below).
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# ✏️ If the Autobuild fails above, remove it and uncomment the following
# three lines and modify them (or add more) to build your code if your
# project uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

66
.github/workflows/container_dev.yml

@ -0,0 +1,66 @@
name: "docker: build dev containers"
on:
push:
branches: [ master ]
workflow_dispatch: {}
permissions:
contents: read
jobs:
build-dev-containers:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=raw,value=dev
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
platforms: linux/amd64, linux/arm64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

82
.github/workflows/container_latest.yml

@ -1,113 +1,63 @@
name: "docker: build latest containers"
name: "docker: build latest container"
on: on:
push: push:
branches:
- master
workflow_dispatch: []
tags:
- '*'
workflow_dispatch: {}
jobs:
build-latest:
runs-on: [ubuntu-latest]
permissions:
contents: read
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=raw,value=latest
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}
jobs:
build-dev:
build-latest-container:
runs-on: [ubuntu-latest] runs-on: [ubuntu-latest]
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@v3
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs ghcr.io/chrislusf/seaweedfs
tags: | tags: |
type=raw,value=dev
type=raw,value=latest
labels: | labels: |
org.opencontainers.image.title=seaweedfs org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast! org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
with: with:
buildkitd-flags: "--debug" buildkitd-flags: "--debug"
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Login to GHCR name: Login to GHCR
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }} username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }} password: ${{ secrets.GHCR_TOKEN }}
- -
name: Build name: Build
uses: docker/build-push-action@v2
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

121
.github/workflows/container_release.yml

@ -1,121 +0,0 @@
name: "docker: build release containers"
on:
push:
tags:
- '*'
workflow_dispatch: []
jobs:
build-default:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=ref,event=tag
flavor: |
latest=false
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}
build-large:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=ref,event=tag,suffix=_large_disk
flavor: |
latest=false
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build_large
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

57
.github/workflows/container_release1.yml

@ -0,0 +1,57 @@
name: "docker: build release containers for normal volume"
on:
push:
tags:
- '*'
workflow_dispatch: {}
permissions:
contents: read
jobs:
build-default-release-container:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
with:
images: |
chrislusf/seaweedfs
tags: |
type=ref,event=tag
flavor: |
latest=false
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

59
.github/workflows/container_release2.yml

@ -0,0 +1,59 @@
name: "docker: build release containers for large volume"
on:
push:
tags:
- '*'
workflow_dispatch: {}
permissions:
contents: read
jobs:
build-large-release-container:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
with:
images: |
chrislusf/seaweedfs
tags: |
type=ref,event=tag,suffix=_large_disk
flavor: |
latest=false
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=5BytesOffset
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

58
.github/workflows/container_release3.yml

@ -0,0 +1,58 @@
name: "docker: build release containers for rocksdb"
on:
push:
tags:
- '*'
workflow_dispatch: {}
permissions:
contents: read
jobs:
build-large-release-container_rocksdb:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
with:
images: |
chrislusf/seaweedfs
tags: |
type=ref,event=tag,suffix=_large_disk_rocksdb
flavor: |
latest=false
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.rocksdb_large
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

58
.github/workflows/container_release4.yml

@ -0,0 +1,58 @@
name: "docker: build release containers for all tags"
on:
push:
tags:
- '*'
workflow_dispatch: {}
permissions:
contents: read
jobs:
build-default-release-container:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
with:
images: |
chrislusf/seaweedfs
tags: |
type=ref,event=tag,suffix=_full
flavor: |
latest=false
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=elastic,ydb,gocdk
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

58
.github/workflows/container_release5.yml

@ -0,0 +1,58 @@
name: "docker: build release containers for all tags and large volume"
on:
push:
tags:
- '*'
workflow_dispatch: {}
permissions:
contents: read
jobs:
build-default-release-container:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3
with:
images: |
chrislusf/seaweedfs
tags: |
type=ref,event=tag,suffix=_large_disk_full
flavor: |
latest=false
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=5BytesOffset,elastic,ydb,gocdk
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

53
.github/workflows/container_test.yml

@ -1,53 +0,0 @@
name: "docker: test building container images"
on:
pull_request:
workflow_dispatch: []
concurrency:
group: ${{ github.head_ref }}/container_test
cancel-in-progress: true
jobs:
build-test:
runs-on: [ubuntu-latest]
strategy:
matrix:
platform: [ linux ]
arch: [ amd64, arm, arm64, 386 ]
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=raw,value=latest
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: false
file: ./docker/Dockerfile
platforms: ${{ matrix.platform }}/${{ matrix.arch }}
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

14
.github/workflows/depsreview.yml

@ -0,0 +1,14 @@
name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@dcd71f646680f2efd8db4afa5ad64fdcba30e748
- name: 'Dependency Review'
uses: actions/dependency-review-action@1c59cdf2a9c7f29c90e8da32237eb04b81bad9f0

17
.github/workflows/go.yml

@ -1,4 +1,4 @@
name: "go: test building binary"
name: "go: build binary"
on: on:
push: push:
@ -10,6 +10,9 @@ concurrency:
group: ${{ github.head_ref }}/go group: ${{ github.head_ref }}/go
cancel-in-progress: true cancel-in-progress: true
permissions:
contents: read
jobs: jobs:
build: build:
@ -18,24 +21,20 @@ jobs:
steps: steps:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@b22fbbc2921299758641fab08929b4ac52b32923 # v2
with: with:
go-version: ^1.13 go-version: ^1.13
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v2
uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
- name: Get dependencies - name: Get dependencies
run: | run: |
cd weed; go get -v -t -d ./... cd weed; go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Build - name: Build
run: cd weed; go build -v .
run: cd weed; go build -tags "elastic gocdk sqlite ydb" -v .
- name: Test - name: Test
run: cd weed; go test -v ./...
run: cd weed; go test -tags "elastic gocdk sqlite ydb" -v ./...

68
.github/workflows/release.yml

@ -1,68 +0,0 @@
name: "go: build dev binaries"
on:
push:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux, windows, darwin, freebsd, netbsd, openbsd]
goarch: [amd64, arm, arm64, 386]
exclude:
- goarch: arm
goos: darwin
- goarch: 386
goos: darwin
- goarch: arm
goos: windows
- goarch: arm64
goos: windows
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Wait for the deletion
uses: jakejarvis/wait-action@master
with:
time: '30s'
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.20
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.20
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"

4
.gitignore

@ -55,6 +55,8 @@ Temporary Items
# Mongo Explorer plugin: # Mongo Explorer plugin:
# .idea/mongoSettings.xml # .idea/mongoSettings.xml
## vscode
.vscode
## File-based project format: ## File-based project format:
*.ipr *.ipr
*.iws *.iws
@ -75,6 +77,8 @@ com_crashlytics_export_strings.xml
crashlytics.properties crashlytics.properties
crashlytics-build.properties crashlytics-build.properties
workspace/
test_data test_data
build build
target target

14
Makefile

@ -0,0 +1,14 @@
BINARY = weed
SOURCE_DIR = .
all: install
install:
cd weed; go install
full_install:
cd weed; go install -tags "elastic gocdk sqlite ydb"
test:
cd weed; go test -tags "elastic gocdk sqlite ydb" -v ./...

46
README.md

@ -31,16 +31,16 @@ Your support will be really appreciated by me and other supporters!
</p> </p>
--> -->
### Gold Sponsors ### Gold Sponsors
![shuguang](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/shuguang.png)
- [![nodion](https://www.nodion.com/img/logo.svg)](https://www.nodion.com)
--- ---
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest) - [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS) - [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
- [SeaweedFS on Telegram](https://t.me/Seaweedfs)
- [SeaweedFS on Reddit](https://www.reddit.com/r/SeaweedFS/)
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
- [SeaweedFS White Paper](https://github.com/chrislusf/seaweedfs/wiki/SeaweedFS_Architecture.pdf) - [SeaweedFS White Paper](https://github.com/chrislusf/seaweedfs/wiki/SeaweedFS_Architecture.pdf)
@ -51,12 +51,15 @@ Table of Contents
================= =================
* [Quick Start](#quick-start) * [Quick Start](#quick-start)
* [Quick Start for S3 API on Docker](#quick-start-for-s3-api-on-docker)
* [Quick Start with Single Binary](#quick-start-with-single-binary)
* [Quick Start SeaweedFS S3 on AWS](#quick-start-seaweedfs-s3-on-aws)
* [Introduction](#introduction) * [Introduction](#introduction)
* [Features](#features) * [Features](#features)
* [Additional Features](#additional-features) * [Additional Features](#additional-features)
* [Filer Features](#filer-features) * [Filer Features](#filer-features)
* [Example: Using Seaweed Object Store](#example-Using-Seaweed-Object-Store) * [Example: Using Seaweed Object Store](#example-Using-Seaweed-Object-Store)
* [Architecture](#architecture)
* [Architecture](#Object-Store-Architecture)
* [Compared to Other File Systems](#compared-to-other-file-systems) * [Compared to Other File Systems](#compared-to-other-file-systems)
* [Compared to HDFS](#compared-to-hdfs) * [Compared to HDFS](#compared-to-hdfs)
* [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph) * [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph)
@ -73,12 +76,15 @@ Table of Contents
`docker run -p 8333:8333 chrislusf/seaweedfs server -s3` `docker run -p 8333:8333 chrislusf/seaweedfs server -s3`
## Quick Start with single binary ##
## Quick Start with Single Binary ##
* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe` * Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway. * Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it! Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!
## Quick Start SeaweedFS S3 on AWS ##
* Setup fast production-ready [SeaweedFS S3 on AWS with cloudformation](https://aws.amazon.com/marketplace/pp/prodview-nzelz5gprlrjc)
## Introduction ## ## Introduction ##
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives: SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
@ -102,7 +108,7 @@ Also, SeaweedFS implements erasure coding with ideas from
On top of the object store, optional [Filer] can support directories and POSIX attributes. On top of the object store, optional [Filer] can support directories and POSIX attributes.
Filer is a separate linearly-scalable stateless server with customizable metadata stores, Filer is a separate linearly-scalable stateless server with customizable metadata stores,
e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, Sqlite, MemSql, TiDB, Etcd, CockroachDB, etc.
e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, Sqlite, MemSql, TiDB, Etcd, CockroachDB, YDB, etc.
For any distributed key value stores, the large values can be offloaded to SeaweedFS. For any distributed key value stores, the large values can be offloaded to SeaweedFS.
With the fast access speed and linearly scalable capacity, With the fast access speed and linearly scalable capacity,
@ -119,7 +125,7 @@ Faster and Cheaper than direct cloud storage!
## Additional Features ## ## Additional Features ##
* Can choose no replication or different replication levels, rack and data center aware. * Can choose no replication or different replication levels, rack and data center aware.
* Automatic master servers failover - no single point of failure (SPOF). * Automatic master servers failover - no single point of failure (SPOF).
* Automatic Gzip compression depending on file mime type.
* Automatic Gzip compression depending on file MIME type.
* Automatic compaction to reclaim disk space after deletion or update. * Automatic compaction to reclaim disk space after deletion or update.
* [Automatic entry TTL expiration][VolumeServerTTL]. * [Automatic entry TTL expiration][VolumeServerTTL].
* Any server with some disk spaces can add to the total storage space. * Any server with some disk spaces can add to the total storage space.
@ -147,6 +153,7 @@ Faster and Cheaper than direct cloud storage!
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. * [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB. * [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
* [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back. * [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back.
* [Gateway to Remote Object Store][GatewayToRemoteObjectStore] mirrors bucket operations to remote object storage, in addition to [Cloud Drive][CloudDrive]
## Kubernetes ## ## Kubernetes ##
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/) * [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
@ -170,6 +177,7 @@ Faster and Cheaper than direct cloud storage!
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication [FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store [KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
[CloudDrive]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Drive-Architecture [CloudDrive]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Drive-Architecture
[GatewayToRemoteObjectStore]: https://github.com/chrislusf/seaweedfs/wiki/Gateway-to-Remote-Object-Storage
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
@ -196,7 +204,7 @@ SeaweedFS uses HTTP REST operations to read, write, and delete. The responses ar
### Write File ### ### Write File ###
To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server url:
To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL:
``` ```
> curl http://localhost:9333/dir/assign > curl http://localhost:9333/dir/assign
@ -245,7 +253,7 @@ First look up the volume server's URLs by the file's volumeId:
Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read. Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read.
Now you can take the public url, render the url or directly read from the volume server via url:
Now you can take the public URL, render the URL or directly read from the volume server via URL:
``` ```
http://localhost:8080/3,01637037d6.jpg http://localhost:8080/3,01637037d6.jpg
@ -346,9 +354,9 @@ On each write request, the master server also generates a file key, which is a g
### Write and Read files ### ### Write and Read files ###
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node url) for the file. The client then contacts the volume node and POSTs the file content.
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node URL) for the file. The client then contacts the volume node and POSTs the file content.
When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node url, volume node public url), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node URL, volume node public URL), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
Please see the example for details on the write-read process. Please see the example for details on the write-read process.
@ -402,7 +410,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files. * SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached. * SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, Sqlite, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
* SeaweedFS Filer metadata store can be any well-known and proven data store, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, Sqlite, MemSql, TiDB, CockroachDB, Etcd, YDB etc, and is easy to customize.
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc. * SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files | | System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
@ -438,13 +446,13 @@ Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more com
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage. SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
Ceph, like SeaweedFS, is based on the object store RADOS. Ceph is rather complicated with mixed reviews.
Ceph uses CRUSH hashing to automatically manage the data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. Topology changes, such as adding new servers to increase capacity, will cause data migration with high IO cost to fit the CRUSH algorithm. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes are also as simple as it can be.
Ceph uses CRUSH hashing to automatically manage data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. Topology changes, such as adding new servers to increase capacity, will cause data migration with high IO cost to fit the CRUSH algorithm. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes is also as simple as it can be.
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read. SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Sqlite, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage.
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Sqlite, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, YDB, to manage file directories. These stores are proven, scalable, and easier to manage.
| SeaweedFS | comparable to Ceph | advantage | | SeaweedFS | comparable to Ceph | advantage |
| ------------- | ------------- | ---------------- | | ------------- | ------------- | ---------------- |
@ -489,7 +497,7 @@ Step 1: install go on your machine and setup the environment by following the in
https://golang.org/doc/install https://golang.org/doc/install
make sure you set up your $GOPATH
make sure to define your $GOPATH
Step 2: checkout this repo: Step 2: checkout this repo:
@ -499,7 +507,7 @@ git clone https://github.com/chrislusf/seaweedfs.git
Step 3: download, compile, and install the project by executing the following command Step 3: download, compile, and install the project by executing the following command
```bash ```bash
make install
cd seaweedfs/weed && make install
``` ```
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
@ -526,7 +534,7 @@ Write 1 million 1KB file:
``` ```
Concurrency Level: 16 Concurrency Level: 16
Time taken for tests: 66.753 seconds Time taken for tests: 66.753 seconds
Complete requests: 1048576
Completed requests: 1048576
Failed requests: 0 Failed requests: 0
Total transferred: 1106789009 bytes Total transferred: 1106789009 bytes
Requests per second: 15708.23 [#/sec] Requests per second: 15708.23 [#/sec]
@ -552,7 +560,7 @@ Randomly read 1 million files:
``` ```
Concurrency Level: 16 Concurrency Level: 16
Time taken for tests: 22.301 seconds Time taken for tests: 22.301 seconds
Complete requests: 1048576
Completed requests: 1048576
Failed requests: 0 Failed requests: 0
Total transferred: 1106812873 bytes Total transferred: 1106812873 bytes
Requests per second: 47019.38 [#/sec] Requests per second: 47019.38 [#/sec]

8
backers.md

@ -5,12 +5,16 @@
<h2 align="center">Generous Backers ($50+)</h2> <h2 align="center">Generous Backers ($50+)</h2>
- [4Sight Imaging](https://www.4sightimaging.com/)
- [Evercam Camera Management Software](https://evercam.io/) - [Evercam Camera Management Software](https://evercam.io/)
- [Admiral](https://getadmiral.com)
<h2 align="center">Backers</h2> <h2 align="center">Backers</h2>
- [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/) - [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/)
- [Haravan - Ecommerce Platform](https://www.haravan.com) - [Haravan - Ecommerce Platform](https://www.haravan.com)
- PeterCxy - Creator of Shelter App - PeterCxy - Creator of Shelter App
- [Hive Games](https://playhive.com/)
- Flowm
- Yoni Nakache
- Catalin Constantin
- MingLi Yuan
- Leroy van Logchem

56
docker/Dockerfile

@ -1,56 +0,0 @@
FROM alpine
# 'latest' or 'dev'
ARG RELEASE=latest
RUN \
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; \
elif [ $(uname -m) == "s390x" ]; then echo "s390x"; \
elif [ $(uname -m) == "ppc64le" ]; then echo "ppc64le"; fi;) && \
echo "Building for $ARCH" 1>&2 && \
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
SUPERCRONIC=supercronic-linux-$ARCH && \
# Install SeaweedFS and Supercronic ( for cron job mode )
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
apk add fuse && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz" | head -n 1) && \
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
curl -fsSLO "$SUPERCRONIC_URL" && \
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
chmod +x "$SUPERCRONIC" && \
mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \
ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \
apk del build-dependencies && \
rm -rf /tmp/*
# volume server gprc port
EXPOSE 18080
# volume server http port
EXPOSE 8080
# filer server gprc port
EXPOSE 18888
# filer server http port
EXPOSE 8888
# master server shared gprc port
EXPOSE 19333
# master server shared http port
EXPOSE 9333
# s3 server http port
EXPOSE 8333
# webdav server http port
EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
COPY filer.toml /etc/seaweedfs/filer.toml
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

1
docker/Dockerfile.gccgo_build

@ -37,6 +37,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2 RUN mkdir -p /data/filerldb2
VOLUME /data VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh RUN chmod +x /entrypoint.sh

6
docker/Dockerfile.go_build

@ -1,12 +1,13 @@
FROM amd64/golang:1.17-alpine as builder
FROM golang:1.18-alpine as builder
RUN apk add git g++ fuse RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/ RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
ARG BRANCH=${BRANCH:-master} ARG BRANCH=${BRANCH:-master}
ARG TAGS
RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}"
&& CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}"
FROM alpine AS final FROM alpine AS final
LABEL author="Chris Lu" LABEL author="Chris Lu"
@ -36,6 +37,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2 RUN mkdir -p /data/filerldb2
VOLUME /data VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh RUN chmod +x /entrypoint.sh

1
docker/Dockerfile.local

@ -26,6 +26,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2 RUN mkdir -p /data/filerldb2
VOLUME /data VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh RUN chmod +x /entrypoint.sh

31
docker/Dockerfile.go_build_large → docker/Dockerfile.rocksdb_large

@ -1,20 +1,37 @@
FROM amd64/golang:1.17-alpine as builder
RUN apk add git g++ fuse
FROM golang:1.18-buster as builder
RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
ENV ROCKSDB_VERSION v7.2.2
# build RocksDB
RUN cd /tmp && \
git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \
cd rocksdb && \
PORTABLE=1 make static_lib && \
make install-static
ENV CGO_CFLAGS "-I/tmp/rocksdb/include"
ENV CGO_LDFLAGS "-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
# build SeaweedFS
RUN mkdir -p /go/src/github.com/chrislusf/ RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
ARG BRANCH=${BRANCH:-master} ARG BRANCH=${BRANCH:-master}
RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& CGO_ENABLED=0 go install -tags 5BytesOffset -ldflags "-extldflags -static ${LDFLAGS}"
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"
FROM alpine AS final FROM alpine AS final
LABEL author="Chris Lu" LABEL author="Chris Lu"
COPY --from=builder /go/bin/weed /usr/bin/ COPY --from=builder /go/bin/weed /usr/bin/
RUN mkdir -p /etc/seaweedfs RUN mkdir -p /etc/seaweedfs
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
RUN apk add fuse # for weed mount
RUN apk add fuse snappy gflags
# volume server gprc port # volume server gprc port
EXPOSE 18080 EXPOSE 18080
@ -33,10 +50,12 @@ EXPOSE 8333
# webdav server http port # webdav server http port
EXPOSE 7333 EXPOSE 7333
RUN mkdir -p /data/filerldb2
RUN mkdir -p /data/filer_rocksdb
VOLUME /data VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"] ENTRYPOINT ["/entrypoint.sh"]

2
docker/Dockerfile.s3tests

@ -28,4 +28,4 @@ ENV \
S3TEST_CONF="/s3test.conf" S3TEST_CONF="/s3test.conf"
ENTRYPOINT ["/bin/bash", "-c"] ENTRYPOINT ["/bin/bash", "-c"]
CMD ["sleep 10 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]
CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]

31
docker/Makefile

@ -7,12 +7,21 @@ gen: dev
binary: binary:
export SWCOMMIT=$(shell git rev-parse --short HEAD) export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)" export SWLDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)"
cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/
cd ../weed; CGO_ENABLED=0 GOOS=linux go build -tags "$(tags)" -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/
build: binary build: binary
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
rm ./weed rm ./weed
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset
docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build .
go_build_large_disk:
docker build --build-arg TAGS=large_disk --no-cache -t chrislusf/seaweedfs:large_disk -f Dockerfile.go_build .
build_rocksdb:
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
s3tests_build: s3tests_build:
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests . docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
@ -40,18 +49,38 @@ dev_registry: build
dev_replicate: build dev_replicate: build
docker-compose -f compose/local-replicate-compose.yml -p seaweedfs up docker-compose -f compose/local-replicate-compose.yml -p seaweedfs up
dev_auditlog: build
docker-compose -f compose/local-auditlog-compose.yml -p seaweedfs up
dev_nextcloud: build
docker-compose -f compose/local-nextcloud-compose.yml -p seaweedfs up
cluster: build cluster: build
docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up
2clusters: build 2clusters: build
docker-compose -f compose/local-clusters-compose.yml -p seaweedfs up docker-compose -f compose/local-clusters-compose.yml -p seaweedfs up
2mount: build
docker-compose -f compose/local-sync-mount-compose.yml -p seaweedfs up
hashicorp_raft: build
docker-compose -f compose/local-hashicorp-raft-compose.yml -p seaweedfs up
s3tests: build s3tests_build s3tests: build s3tests_build
docker-compose -f compose/local-s3tests-compose.yml -p seaweedfs up docker-compose -f compose/local-s3tests-compose.yml -p seaweedfs up
filer_etcd: build filer_etcd: build
docker stack deploy -c compose/swarm-etcd.yml fs docker stack deploy -c compose/swarm-etcd.yml fs
test_etcd: build
docker-compose -f compose/test-etcd-filer.yml -p seaweedfs up
test_ydb: tags = ydb
test_ydb: build
export
docker-compose -f compose/test-ydb-filer.yml -p seaweedfs up
clean: clean:
rm ./weed rm ./weed

5
docker/README.md

@ -37,3 +37,8 @@ docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,l
docker buildx stop $BUILDER docker buildx stop $BUILDER
``` ```
## Minio debuging
```
mc config host add local http://127.0.0.1:9000 some_access_key1 some_secret_key1
mc admin trace --all --verbose local
```

4
docker/compose/fluent.json

@ -0,0 +1,4 @@
{
"fluent_port": 24224,
"fluent_host": "fluent"
}

36
docker/compose/local-auditlog-compose.yml

@ -0,0 +1,36 @@
version: '2'
services:
s3:
image: chrislusf/seaweedfs:local
ports:
- 8333:8333
- 9333:9333
- 19333:19333
- 8084:8080
- 18084:18080
- 8888:8888
- 18888:18888
- 8000:8000
command: "server -ip=s3 -filer -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.auditLogConfig=/etc/seaweedfs/fluent.json -volume.max=0 -master.volumeSizeLimitMB=8 -volume.preStopSeconds=1"
volumes:
- ./fluent.json:/etc/seaweedfs/fluent.json
- ./s3.json:/etc/seaweedfs/s3.json
depends_on:
- fluent
fluent:
image: fluent/fluentd:v1.14
ports:
- 24224:24224
#s3tests:
# image: chrislusf/ceph-s3-tests:local
# volumes:
# - ./s3tests.conf:/opt/s3-tests/s3tests.conf
# environment:
# S3TEST_CONF: "s3tests.conf"
# NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
# NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
# NOSETESTS_EXCLUDE: "(get_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)"
# depends_on:
# - s3
# - fluent

29
docker/compose/local-cluster-compose.yml

@ -6,25 +6,37 @@ services:
ports: ports:
- 9333:9333 - 9333:9333
- 19333:19333 - 19333:19333
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m1"
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
master1: master1:
image: chrislusf/seaweedfs:local image: chrislusf/seaweedfs:local
ports: ports:
- 9334:9334 - 9334:9334
- 19334:19334 - 19334:19334
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m2"
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
master2: master2:
image: chrislusf/seaweedfs:local image: chrislusf/seaweedfs:local
ports: ports:
- 9335:9335 - 9335:9335
- 19335:19335 - 19335:19335
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m3"
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
volume1: volume1:
image: chrislusf/seaweedfs:local image: chrislusf/seaweedfs:local
ports: ports:
- 8080:8080 - 8080:8080
- 18080:18080 - 18080:18080
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1 -disk=ssd1'
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
depends_on: depends_on:
- master0 - master0
- master1 - master1
@ -34,7 +46,7 @@ services:
ports: ports:
- 8082:8082 - 8082:8082
- 18082:18082 - 18082:18082
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1 -disk=ssd1'
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
depends_on: depends_on:
- master0 - master0
- master1 - master1
@ -44,7 +56,7 @@ services:
ports: ports:
- 8083:8083 - 8083:8083
- 18083:18083 - 18083:18083
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
depends_on: depends_on:
- master0 - master0
- master1 - master1
@ -54,7 +66,8 @@ services:
ports: ports:
- 8888:8888 - 8888:8888
- 18888:18888 - 18888:18888
command: 'filer -master="master0:9333,master1:9334,master2:9335"'
- 8111:8111
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"'
depends_on: depends_on:
- master0 - master0
- master1 - master1
@ -65,7 +78,7 @@ services:
image: chrislusf/seaweedfs:local image: chrislusf/seaweedfs:local
ports: ports:
- 8333:8333 - 8333:8333
command: 's3 -filer="filer:8888"'
command: '-v=9 s3 -filer="filer:8888"'
depends_on: depends_on:
- master0 - master0
- master1 - master1

89
docker/compose/local-hashicorp-raft-compose.yml

@ -0,0 +1,89 @@
version: '2'
services:
master0:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data"
volumes:
- ./master/0:/data
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
master1:
image: chrislusf/seaweedfs:local
ports:
- 9334:9334
- 19334:19334
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data"
volumes:
- ./master/1:/data
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
master2:
image: chrislusf/seaweedfs:local
ports:
- 9335:9335
- 19335:19335
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data"
volumes:
- ./master/2:/data
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
volume1:
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
depends_on:
- master0
- master1
volume2:
image: chrislusf/seaweedfs:local
ports:
- 8082:8082
- 18082:18082
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
depends_on:
- master0
- master1
volume3:
image: chrislusf/seaweedfs:local
ports:
- 8083:8083
- 18083:18083
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
depends_on:
- master0
- master1
filer:
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
- 8111:8111
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"'
depends_on:
- master0
- master1
- volume1
- volume2
s3:
image: chrislusf/seaweedfs:local
ports:
- 8333:8333
command: '-v=9 s3 -ip.bind="s3" -filer="filer:8888"'
depends_on:
- master0
- master1
- volume1
- volume2
- filer

44
docker/compose/local-nextcloud-compose.yml

@ -0,0 +1,44 @@
version: '2'
services:
master:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master"
volume:
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
depends_on:
- master
s3:
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
- 8333:8333
command: '-v 9 filer -master="master:9333" -s3'
depends_on:
- master
- volume
nextcloud:
image: nextcloud:23.0.5-apache
environment:
- OBJECTSTORE_S3_HOST=s3
- OBJECTSTORE_S3_BUCKET=nextcloud
- OBJECTSTORE_S3_KEY=some_access_key1
- OBJECTSTORE_S3_SECRET=some_secret_key1
- OBJECTSTORE_S3_PORT=8333
- OBJECTSTORE_S3_SSL=false
- OBJECTSTORE_S3_USEPATH_STYLE=true
- SQLITE_DATABASE=nextcloud
- NEXTCLOUD_ADMIN_USER=admin
- NEXTCLOUD_ADMIN_PASSWORD=admin
ports:
- 80:80
depends_on:
- s3

4
docker/compose/local-s3tests-compose.yml

@ -24,7 +24,7 @@ services:
- 8888:8888 - 8888:8888
- 18888:18888 - 18888:18888
- 8000:8000 - 8000:8000
command: 'filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000'
command: 'filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false'
volumes: volumes:
- ./s3.json:/etc/seaweedfs/s3.json - ./s3.json:/etc/seaweedfs/s3.json
depends_on: depends_on:
@ -38,7 +38,7 @@ services:
S3TEST_CONF: "s3tests.conf" S3TEST_CONF: "s3tests.conf"
NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3" NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy" NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
NOSETESTS_EXCLUDE: "(bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)"
NOSETESTS_EXCLUDE: "(get_bucket_encryption|delete_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket|list_multipart_upload_owner|multipart_upload_small)"
depends_on: depends_on:
- master - master
- volume - volume

21
docker/compose/local-sync-mount-compose.yml

@ -0,0 +1,21 @@
version: '3.9'
services:
node1:
image: chrislusf/seaweedfs:local
command: "server -master -volume -filer"
mount1:
image: chrislusf/seaweedfs:local
privileged: true
command: "mount -filer=node1:8888 -dir=/mnt -dirAutoCreate"
node2:
image: chrislusf/seaweedfs:local
ports:
- 7888:8888
command: "server -master -volume -filer"
mount2:
image: chrislusf/seaweedfs:local
privileged: true
command: "mount -filer=node2:8888 -dir=/mnt -dirAutoCreate"
sync:
image: chrislusf/seaweedfs:local
command: "-v=4 filer.sync -a=node1:8888 -b=node2:8888 -a.debug -b.debug"

1
docker/compose/master-cloud.toml

@ -28,3 +28,4 @@ sleep_minutes = 17 # sleep minutes between each script execution
region = "us-east-2" region = "us-east-2"
bucket = "volume_bucket" # an existing bucket bucket = "volume_bucket" # an existing bucket
endpoint = "http://server2:8333" endpoint = "http://server2:8333"
storage_class = "STANDARD_IA"

61
docker/compose/test-etcd-filer.yml

@ -0,0 +1,61 @@
version: '2'
services:
etcd:
image: quay.io/coreos/etcd:v3.5.4
command: "etcd --advertise-client-urls http://etcd:2379 --listen-client-urls http://0.0.0.0:2379"
ports:
- 2379:2379
master:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master -volumeSizeLimitMB=1024"
volume:
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
depends_on:
- master
s3:
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
- 8333:8333
command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
environment:
WEED_LEVELDB2_ENABLED: 'false'
WEED_ETCD_ENABLED: 'true'
WEED_ETCD_SERVERS: "http://etcd:2379"
volumes:
- ./s3.json:/etc/seaweedfs/s3.json
depends_on:
- etcd
- master
- volume
registry:
image: registry:2
environment:
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
REGISTRY_LOG_LEVEL: "debug"
REGISTRY_STORAGE: "s3"
REGISTRY_STORAGE_S3_REGION: "us-east-1"
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
REGISTRY_STORAGE_S3_BUCKET: "registry"
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
REGISTRY_STORAGE_S3_V4AUTH: "true"
REGISTRY_STORAGE_S3_SECURE: "false"
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
REGISTRY_STORAGE_DELETE_ENABLED: "true"
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
REGISTRY_VALIDATION_DISABLED: "true"
ports:
- 5001:5001
depends_on:
- s3

35
docker/compose/test-ydb-filer.yml

@ -0,0 +1,35 @@
version: '2'
services:
ydb:
image: cr.yandex/yc/yandex-docker-local-ydb
ports:
- 2135:2135
- 8765:8765
- 2136:2136
environment:
- YDB_DEFAULT_LOG_LEVEL=DEBUG
- GRPC_TLS_PORT=2135
- GRPC_PORT=2136
- MON_PORT=8765
s3:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
- 8888:8888
- 8000:8000
- 18888:18888
command: "server -ip=s3 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false"
volumes:
- ./s3.json:/etc/seaweedfs/s3.json
environment:
WEED_LEVELDB2_ENABLED: "false"
WEED_YDB_ENABLED: "true"
WEED_YDB_DSN: "grpc://ydb:2136/?database=local"
WEED_YDB_PREFIX: "seaweedfs"
YDB_ANONYMOUS_CREDENTIALS: 1
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
depends_on:
- ydb

2
docker/compose/tls.env

@ -12,3 +12,5 @@ WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,clie
WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
#GRPC_GO_LOG_SEVERITY_LEVEL=info
#GRPC_GO_LOG_VERBOSITY_LEVEL=2

26
docker/entrypoint.sh

@ -24,7 +24,7 @@ case "$1" in
'master') 'master')
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024" ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
shift shift
exec /usr/bin/weed master $ARGS $@
exec /usr/bin/weed -logtostderr=true master $ARGS $@
;; ;;
'volume') 'volume')
@ -33,7 +33,7 @@ case "$1" in
ARGS="-dir=/data" ARGS="-dir=/data"
fi fi
shift shift
exec /usr/bin/weed volume $ARGS $@
exec /usr/bin/weed -logtostderr=true volume $ARGS $@
;; ;;
'server') 'server')
@ -42,31 +42,27 @@ case "$1" in
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024" ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
fi fi
shift shift
exec /usr/bin/weed server $ARGS $@
exec /usr/bin/weed -logtostderr=true server $ARGS $@
;; ;;
'filer') 'filer')
ARGS="" ARGS=""
shift shift
exec /usr/bin/weed filer $ARGS $@
exec /usr/bin/weed -logtostderr=true filer $ARGS $@
;; ;;
's3') 's3')
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE" ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
shift shift
exec /usr/bin/weed s3 $ARGS $@
exec /usr/bin/weed -logtostderr=true s3 $ARGS $@
;; ;;
'cronjob')
MASTER=${WEED_MASTER-localhost:9333}
FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *}
echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "lock; volume.fix.replication; unlock" | weed shell -master='$MASTER > /crontab
BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *}
echo "$BALANCING_CRON_SCHEDULE" 'echo "lock; volume.balance -collection ALL_COLLECTIONS -force; unlock" | weed shell -master='$MASTER >> /crontab
echo "Running Crontab:"
cat /crontab
exec supercronic /crontab
;;
'shell')
ARGS="-cluster=$SHELL_CLUSTER -filer=$SHELL_FILER -filerGroup=$SHELL_FILER_GROUP -master=$SHELL_MASTER -options=$SHELL_OPTIONS"
shift
exec echo "$@" | /usr/bin/weed -logtostderr=true shell $ARGS
;;
*) *)
exec /usr/bin/weed $@ exec /usr/bin/weed $@
;; ;;

3
docker/filer_rocksdb.toml

@ -0,0 +1,3 @@
[rocksdb]
enabled = true
dir = "/data/filer_rocksdb"

1
docker/prometheus/prometheus.yml

@ -8,6 +8,7 @@ scrape_configs:
static_configs: static_configs:
- targets: - targets:
- 'prometheus:9090' - 'prometheus:9090'
- 'master:9324'
- 'volume:9325' - 'volume:9325'
- 'filer:9326' - 'filer:9326'
- 's3:9327' - 's3:9327'

19
docker/seaweedfs-compose.yml

@ -6,14 +6,15 @@ services:
ports: ports:
- 9333:9333 - 9333:9333
- 19333:19333 - 19333:19333
command: "master -ip=master"
- 9324:9324
command: "master -ip=master -ip.bind=0.0.0.0 -metricsPort=9324"
volume: volume:
image: chrislusf/seaweedfs # use a remote image image: chrislusf/seaweedfs # use a remote image
ports: ports:
- 8080:8080 - 8080:8080
- 18080:18080 - 18080:18080
- 9325:9325 - 9325:9325
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325'
command: 'volume -mserver="master:9333" -ip.bind=0.0.0.0 -port=8080 -metricsPort=9325'
depends_on: depends_on:
- master - master
filer: filer:
@ -22,28 +23,18 @@ services:
- 8888:8888 - 8888:8888
- 18888:18888 - 18888:18888
- 9326:9326 - 9326:9326
command: 'filer -master="master:9333" -metricsPort=9326'
command: 'filer -master="master:9333" -ip.bind=0.0.0.0 -metricsPort=9326'
tty: true tty: true
stdin_open: true stdin_open: true
depends_on: depends_on:
- master - master
- volume - volume
cronjob:
image: chrislusf/seaweedfs # use a remote image
command: 'cronjob'
environment:
# Run re-replication every 2 minutes
CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *'
WEED_MASTER: master:9333 # Default: localhost:9333
depends_on:
- master
- volume
s3: s3:
image: chrislusf/seaweedfs # use a remote image image: chrislusf/seaweedfs # use a remote image
ports: ports:
- 8333:8333 - 8333:8333
- 9327:9327 - 9327:9327
command: 's3 -filer="filer:8888" -metricsPort=9327'
command: 's3 -filer="filer:8888" -ip.bind=0.0.0.0 -metricsPort=9327'
depends_on: depends_on:
- master - master
- volume - volume

2
docker/seaweedfs.sql

@ -1,6 +1,6 @@
CREATE DATABASE IF NOT EXISTS seaweedfs; CREATE DATABASE IF NOT EXISTS seaweedfs;
CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret'; CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret';
GRANT ALL PRIVILEGES ON seaweedfs_fast.* TO 'seaweedfs'@'%';
GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%';
FLUSH PRIVILEGES; FLUSH PRIVILEGES;
USE seaweedfs; USE seaweedfs;
CREATE TABLE IF NOT EXISTS filemeta ( CREATE TABLE IF NOT EXISTS filemeta (

286
go.mod

@ -1,27 +1,24 @@
module github.com/chrislusf/seaweedfs module github.com/chrislusf/seaweedfs
go 1.17
go 1.18
require ( require (
cloud.google.com/go v0.58.0 // indirect
cloud.google.com/go/pubsub v1.3.1
cloud.google.com/go/storage v1.9.0
cloud.google.com/go v0.102.0 // indirect
cloud.google.com/go/pubsub v1.22.2
cloud.google.com/go/storage v1.22.1
github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 // indirect
github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.35.3
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/OneOfOne/xxhash v1.2.8
github.com/Shopify/sarama v1.34.1
github.com/aws/aws-sdk-go v1.44.37
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/bwmarrin/snowflake v0.3.0 github.com/bwmarrin/snowflake v0.3.0
github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash v1.1.0
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/chrislusf/raft v1.0.7
github.com/colinmarc/hdfs/v2 v2.2.0
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/chrislusf/raft v1.0.9
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.0.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/disintegration/imaging v1.6.2 github.com/disintegration/imaging v1.6.2
@ -34,91 +31,77 @@ require (
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/fclairamb/ftpserverlib v0.8.0
github.com/frankban/quicktest v1.7.2 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/fclairamb/ftpserverlib v0.18.0
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/go-errors/errors v1.1.1 // indirect github.com/go-errors/errors v1.1.1 // indirect
github.com/go-redis/redis/v8 v8.4.4
github.com/go-sql-driver/mysql v1.5.0
github.com/go-stack/stack v1.8.0 // indirect
github.com/go-redis/redis/v8 v8.11.5
github.com/go-redsync/redsync/v4 v4.5.0
github.com/go-sql-driver/mysql v1.6.0
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-zookeeper/zk v1.0.2 // indirect github.com/go-zookeeper/zk v1.0.2 // indirect
github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d
github.com/golang-jwt/jwt v3.2.1+incompatible
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
github.com/golang/protobuf v1.4.3
github.com/golang-jwt/jwt v3.2.2+incompatible
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.0.0
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/uuid v1.2.0
github.com/google/wire v0.4.0 // indirect
github.com/googleapis/gax-go v2.0.2+incompatible // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
github.com/google/btree v1.1.2
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.3.0
github.com/google/wire v0.5.0 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/gorilla/mux v1.8.0
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.1
github.com/jinzhu/copier v0.2.8
github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect
github.com/jinzhu/copier v0.3.5
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.11
github.com/jstemmer/go-junit-report v0.9.1 // indirect
github.com/karlseguin/ccache/v2 v2.0.7
github.com/json-iterator/go v1.1.12
github.com/karlseguin/ccache/v2 v2.0.8
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.10.9 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/reedsolomon v1.9.2
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/klauspost/compress v1.15.6 // indirect
github.com/klauspost/reedsolomon v1.9.16
github.com/kurin/blazer v0.5.3 github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.0
github.com/magiconair/properties v1.8.1 // indirect
github.com/mailru/easyjson v0.7.1 // indirect
github.com/mattn/go-ieproxy v0.0.1 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/lib/pq v1.10.6
github.com/linxGnu/grocksdb v1.7.3
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-ieproxy v0.0.3 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/nats-io/jwt v1.0.1 // indirect
github.com/nats-io/nats.go v1.10.0 // indirect
github.com/nats-io/nkeys v0.2.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/olivere/elastic/v7 v7.0.19
github.com/pelletier/go-toml v1.7.0 // indirect
github.com/peterh/liner v1.1.0
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/olivere/elastic/v7 v7.0.32
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/peterh/liner v1.2.2
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/posener/complete v1.2.3 github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.1.0 github.com/pquerna/cachecontrol v0.1.0
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_golang v1.12.2
github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/seaweedfs/fuse v1.2.0
github.com/seaweedfs/goexif v1.0.2
github.com/sirupsen/logrus v1.6.0 // indirect
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/seaweedfs/goexif v2.0.0+incompatible
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.3.0 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.4.0
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71
github.com/stretchr/testify v1.7.0
github.com/spf13/viper v1.12.0
github.com/streadway/amqp v1.0.0
github.com/stretchr/testify v1.7.3
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
github.com/tidwall/gjson v1.8.1
github.com/tidwall/match v1.0.3
github.com/tidwall/pretty v1.1.0 // indirect
github.com/tikv/client-go v0.0.0-20210412055529-d811a08025fa
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210824090536-16d902a3c7e5 // indirect
github.com/tidwall/gjson v1.14.1
github.com/tidwall/match v1.1.1
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool v1.0.0
@ -126,56 +109,129 @@ require (
github.com/viant/ptrie v0.3.0 github.com/viant/ptrie v0.3.0
github.com/viant/toolbox v0.33.2 // indirect github.com/viant/toolbox v0.33.2 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.0.2 // indirect
github.com/xdg-go/stringprep v1.0.2 // indirect
github.com/xdg-go/scram v1.1.1 // indirect
github.com/xdg-go/stringprep v1.0.3 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.etcd.io/etcd v3.3.25+incompatible
go.mongodb.org/mongo-driver v1.7.0
go.opencensus.io v0.22.4 // indirect
go.opentelemetry.io/otel v0.15.0 // indirect
gocloud.dev v0.20.0
gocloud.dev/pubsub/natspubsub v0.20.0
gocloud.dev/pubsub/rabbitpubsub v0.20.0
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect
go.etcd.io/etcd/client/v3 v3.5.4
go.mongodb.org/mongo-driver v1.9.1
go.opencensus.io v0.23.0 // indirect
gocloud.dev v0.25.0
gocloud.dev/pubsub/natspubsub v0.25.0
gocloud.dev/pubsub/rabbitpubsub v0.25.0
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 golang.org/x/image v0.0.0-20200119044424-58c23975cae1
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
golang.org/x/sys v0.0.0-20210817142637-7d9622a276b7
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.4
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/api v0.26.0
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 // indirect
google.golang.org/grpc v1.29.1
google.golang.org/protobuf v1.26.0-rc.1
golang.org/x/net v0.0.0-20220607020251-c690dde0001d
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/api v0.83.0
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
google.golang.org/grpc v1.47.0
google.golang.org/protobuf v1.28.0
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
modernc.org/b v1.0.0 // indirect modernc.org/b v1.0.0 // indirect
modernc.org/cc/v3 v3.33.5 // indirect
modernc.org/ccgo/v3 v3.9.4 // indirect
modernc.org/libc v1.9.5 // indirect
modernc.org/mathutil v1.2.2 // indirect
modernc.org/memory v1.0.4 // indirect
modernc.org/cc/v3 v3.36.0 // indirect
modernc.org/ccgo/v3 v3.16.6 // indirect
modernc.org/libc v1.16.7 // indirect
modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.1.1 // indirect
modernc.org/opt v0.1.1 // indirect modernc.org/opt v0.1.1 // indirect
modernc.org/sqlite v1.10.7
modernc.org/strutil v1.1.0 // indirect
modernc.org/sqlite v1.17.2
modernc.org/strutil v1.1.2
modernc.org/token v1.0.0 // indirect modernc.org/token v1.0.0 // indirect
) )
require ( require (
github.com/Jille/raft-grpc-transport v1.2.0
github.com/arangodb/go-driver v1.3.2
github.com/fluent/fluent-logger-golang v1.9.0
github.com/hanwen/go-fuse/v2 v2.1.1-0.20220531082602-17a864ed5940
github.com/hashicorp/raft v1.3.9
github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0
github.com/tikv/client-go/v2 v2.0.1
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2
github.com/ydb-platform/ydb-go-sdk/v3 v3.27.0
google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1
)
require (
cloud.google.com/go/compute v1.6.1 // indirect
cloud.google.com/go/iam v0.3.0 // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/armon/go-metrics v0.3.10 // indirect
github.com/aws/aws-sdk-go-v2 v1.16.2 // indirect
github.com/aws/aws-sdk-go-v2/config v1.15.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.17.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 // indirect
github.com/aws/smithy-go v1.11.2 // indirect
github.com/benbjohnson/clock v1.1.0 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
github.com/d4l3k/messagediff v1.2.1 // indirect github.com/d4l3k/messagediff v1.2.1 // indirect
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/fclairamb/go-log v0.3.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/googleapis/go-type-adapters v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect
github.com/hashicorp/go-hclog v1.2.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v1.1.5 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/rpc/v2 v2.0.2 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/cpuid/v2 v2.0.6 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-runewidth v0.0.7 // indirect
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect
github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
github.com/philhofer/fwd v1.1.1 // indirect
github.com/pierrec/lz4/v4 v4.1.14 // indirect
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect
github.com/pingcap/kvproto v0.0.0-20220106070556-3fa8fa04f898 // indirect
github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee // indirect
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.3.0 // indirect
github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 // indirect
github.com/tinylib/msgp v1.1.6 // indirect
github.com/twmb/murmur3 v1.1.3 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220531094121-36ca6bddb9f7 // indirect
github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect
go.etcd.io/etcd/api/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/uint128 v1.1.1 // indirect
) )
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft
replace go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
// replace github.com/chrislusf/raft => /Users/chrislu/go/src/github.com/chrislusf/raft

1679
go.sum
File diff suppressed because it is too large
View File

4
k8s/helm_charts2/Chart.yaml

@ -1,5 +1,5 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
appVersion: "2.65"
version: "2.65"
appVersion: "3.12"
version: "3.12"

15
k8s/helm_charts2/templates/_helpers.tpl

@ -113,21 +113,6 @@ Inject extra environment vars in the format key:value, if populated
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/* Return the proper cronjob image */}}
{{- define "cronjob.image" -}}
{{- if .Values.cronjob.imageOverride -}}
{{- $imageOverride := .Values.cronjob.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
{{/* check if any PVC exists */}} {{/* check if any PVC exists */}}
{{- define "volume.pvc_exists" -}} {{- define "volume.pvc_exists" -}}
{{- if or (or (eq .Values.volume.data.type "persistentVolumeClaim") (and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "persistentVolumeClaim") -}} {{- if or (or (eq .Values.volume.data.type "persistentVolumeClaim") (and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "persistentVolumeClaim") -}}

58
k8s/helm_charts2/templates/cronjob.yaml

@ -1,58 +0,0 @@
{{- if .Values.cronjob }}
{{- if .Values.cronjob.enabled }}
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: {{ include "seaweedfs.fullname" . }}-cronjob
spec:
schedule: "{{ .Values.cronjob.schedule }}"
startingDeadlineSeconds: 200
concurrencyPolicy: Forbid
failedJobsHistoryLimit: 2
successfulJobsHistoryLimit: 2
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
{{- if .Values.cronjob.nodeSelector }}
nodeSelector:
{{ tpl .Values.cronjob.nodeSelector . | indent 12 | trim }}
{{- end }}
{{- if .Values.cronjob.tolerations }}
tolerations:
{{ tpl .Values.cronjob.tolerations . | nindent 12 | trim }}
{{- end }}
restartPolicy: OnFailure
containers:
- name: shell
image: {{ template "cronjob.image" . }}
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
resources:
{{- toYaml .Values.cronjob.resources| nindent 16 }}
command:
- sh
- -c
- |
set -ex
echo -e "lock\n\
volume.balance -force \
{{ if .Values.volume.dataCenter }} -dataCenter {{ .Values.volume.dataCenter }}{{ end }}\
{{ if .Values.cronjob.collection }} -collection {{ .Values.cronjob.collection }}{{ end }}\n\
{{- if .Values.cronjob.enableFixReplication }}
volume.fix.replication -collectionPattern={{ .Values.cronjob.collectionPattern }} \n\
{{- end }}
unlock\n" | \
/usr/bin/weed shell \
{{- if .Values.cronjob.master }}
-master {{ .Values.cronjob.master }} \
{{- else }}
-master {{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc:{{ .Values.master.port }} \
{{- end }}
{{- if .Values.cronjob.filer }}
-filer {{ .Values.cronjob.filer }}
{{- else }}
-filer {{ template "seaweedfs.name" . }}-filer.{{ .Release.Namespace }}.svc:{{ .Values.filer.port }}
{{- end }}
{{- end }}
{{- end }}

2
k8s/helm_charts2/templates/filer-servicemonitor.yaml

@ -1,4 +1,5 @@
{{- if .Values.filer.metricsPort }} {{- if .Values.filer.metricsPort }}
{{- if .Values.global.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor kind: ServiceMonitor
metadata: metadata:
@ -16,3 +17,4 @@ spec:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: filer component: filer
{{- end }} {{- end }}
{{- end }}

40
k8s/helm_charts2/templates/filer-statefulset.yaml

@ -133,11 +133,6 @@ spec:
-encryptVolumeData \ -encryptVolumeData \
{{- end }} {{- end }}
-ip=${POD_IP} \ -ip=${POD_IP} \
{{- if .Values.filer.enable_peers }}
{{- if gt (.Values.filer.replicas | int) 1 }}
-peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \
{{- end }}
{{- end }}
{{- if .Values.filer.s3.enabled }} {{- if .Values.filer.s3.enabled }}
-s3 \ -s3 \
-s3.port={{ .Values.filer.s3.port }} \ -s3.port={{ .Values.filer.s3.port }} \
@ -154,15 +149,21 @@ spec:
{{- if .Values.filer.s3.enableAuth }} {{- if .Values.filer.s3.enableAuth }}
-s3.config=/etc/sw/seaweedfs_s3_config \ -s3.config=/etc/sw/seaweedfs_s3_config \
{{- end }} {{- end }}
{{- if .Values.filer.s3.auditLogConfig }}
-s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \
{{- end }}
{{- end }} {{- end }}
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
volumeMounts: volumeMounts:
- name: seaweedfs-filer-log-volume - name: seaweedfs-filer-log-volume
mountPath: "/logs/" mountPath: "/logs/"
- mountPath: /etc/sw - mountPath: /etc/sw
name: config-users name: config-users
readOnly: true readOnly: true
{{- if .Values.filer.enablePVC }}
- name: data-filer
mountPath: /data
{{- end }}
{{- if .Values.global.enableSecurity }} {{- if .Values.global.enableSecurity }}
- name: security-config - name: security-config
readOnly: true readOnly: true
@ -185,7 +186,6 @@ spec:
mountPath: /usr/local/share/ca-certificates/client/ mountPath: /usr/local/share/ca-certificates/client/
{{- end }} {{- end }}
{{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }} {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }}
{{- end }}
ports: ports:
- containerPort: {{ .Values.filer.port }} - containerPort: {{ .Values.filer.port }}
name: swfs-filer name: swfs-filer
@ -252,16 +252,18 @@ spec:
nodeSelector: nodeSelector:
{{ tpl .Values.filer.nodeSelector . | indent 8 | trim }} {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }}
{{- end }} {{- end }}
{{/* volumeClaimTemplates:*/}}
{{/* - metadata:*/}}
{{/* name: data-{{ .Release.Namespace }}*/}}
{{/* spec:*/}}
{{/* accessModes:*/}}
{{/* - ReadWriteOnce*/}}
{{/* resources:*/}}
{{/* requests:*/}}
{{/* storage: {{ .Values.filer.storage }}*/}}
{{/* {{- if .Values.filer.storageClass }}*/}}
{{/* storageClassName: {{ .Values.filer.storageClass }}*/}}
{{/* {{- end }}*/}}
{{- if .Values.filer.enablePVC }}
volumeClaimTemplates:
- metadata:
name: data-filer
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.filer.storage }}
{{- if .Values.filer.storageClass }}
storageClassName: {{ .Values.filer.storageClass }}
{{- end }}
{{- end }}
{{- end }} {{- end }}

90
k8s/helm_charts2/templates/ingress.yaml

@ -1,59 +1,67 @@
{{- if .Values.filer.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress kind: Ingress
metadata: metadata:
name: ingress-{{ template "seaweedfs.name" . }}-filer
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
name: ingress-{{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
annotations:
{{ omit .Values.filer.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
spec: spec:
rules:
- http:
paths:
- path: /sw-filer/?(.*)
backend:
serviceName: {{ template "seaweedfs.name" . }}-filer
servicePort: {{ .Values.filer.port }}
ingressClassName: {{ .Values.filer.ingress.className | quote }}
rules:
- http:
paths:
- path: /sw-filer/?(.*)
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-filer
port:
number: {{ .Values.filer.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-filer
servicePort: {{ .Values.filer.port }}
{{- end }}
{{- end }}
--- ---
{{- if .Values.master.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress kind: Ingress
metadata: metadata:
name: ingress-{{ template "seaweedfs.name" . }}-master name: ingress-{{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
annotations: annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
{{ omit .Values.master.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
spec: spec:
ingressClassName: {{ .Values.master.ingress.className | quote }}
rules: rules:
- http: - http:
paths: paths:
- path: /sw-master/?(.*) - path: /sw-master/?(.*)
pathType: ImplementationSpecific
backend: backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-master
port:
number: {{ .Values.master.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-master serviceName: {{ template "seaweedfs.name" . }}-master
servicePort: {{ .Values.master.port }} servicePort: {{ .Values.master.port }}
{{- end }}
{{- end }}

8
k8s/helm_charts2/templates/s3-deployment.yaml

@ -10,7 +10,6 @@ metadata:
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
spec: spec:
serviceName: {{ template "seaweedfs.name" . }}-s3
replicas: {{ .Values.s3.replicas }} replicas: {{ .Values.s3.replicas }}
selector: selector:
matchLabels: matchLabels:
@ -93,6 +92,9 @@ spec:
{{- if .Values.s3.enableAuth }} {{- if .Values.s3.enableAuth }}
-config=/etc/sw/seaweedfs_s3_config \ -config=/etc/sw/seaweedfs_s3_config \
{{- end }} {{- end }}
{{- if .Values.s3.auditLogConfig }}
-auditLogConfig=/etc/sw/s3_auditLogConfig.json \
{{- end }}
-filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }} -filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }}
volumeMounts: volumeMounts:
- name: logs - name: logs
@ -127,7 +129,7 @@ spec:
name: swfs-s3 name: swfs-s3
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /
path: /status
port: {{ .Values.s3.port }} port: {{ .Values.s3.port }}
scheme: HTTP scheme: HTTP
initialDelaySeconds: 15 initialDelaySeconds: 15
@ -137,7 +139,7 @@ spec:
timeoutSeconds: 10 timeoutSeconds: 10
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /
path: /status
port: {{ .Values.s3.port }} port: {{ .Values.s3.port }}
scheme: HTTP scheme: HTTP
initialDelaySeconds: 20 initialDelaySeconds: 20

2
k8s/helm_charts2/templates/s3-servicemonitor.yaml

@ -1,4 +1,5 @@
{{- if .Values.s3.metricsPort }} {{- if .Values.s3.metricsPort }}
{{- if .Values.global.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor kind: ServiceMonitor
metadata: metadata:
@ -16,3 +17,4 @@ spec:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: s3 component: s3
{{- end }} {{- end }}
{{- end }}

8
k8s/helm_charts2/templates/seaweedfs-s3-secret.yaml

@ -18,4 +18,12 @@ stringData:
read_access_key_id: {{ $access_key_read }} read_access_key_id: {{ $access_key_read }}
read_secret_access_key: {{ $secret_key_read }} read_secret_access_key: {{ $secret_key_read }}
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}' seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}'
{{- if .Values.filer.s3.auditLogConfig }}
filer_s3_auditLogConfig.json: |
{{ toJson .Values.filer.s3.auditLogConfig | nindent 4 }}
{{- end }}
{{- if .Values.s3.auditLogConfig }}
s3_auditLogConfig.json: |
{{ toJson .Values.s3.auditLogConfig | nindent 4 }}
{{- end }}
{{- end }} {{- end }}

4
k8s/helm_charts2/templates/service-account.yaml

@ -1,7 +1,7 @@
#hack for delete pod master after migration #hack for delete pod master after migration
--- ---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: seaweefds-rw-cr name: seaweefds-rw-cr
rules: rules:
@ -16,7 +16,7 @@ metadata:
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
--- ---
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: system:serviceaccount:seaweefds-rw-sa:default name: system:serviceaccount:seaweefds-rw-sa:default
subjects: subjects:

2
k8s/helm_charts2/templates/volume-servicemonitor.yaml

@ -1,4 +1,5 @@
{{- if .Values.volume.metricsPort }} {{- if .Values.volume.metricsPort }}
{{- if .Values.global.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor kind: ServiceMonitor
metadata: metadata:
@ -16,3 +17,4 @@ spec:
app: {{ template "seaweedfs.name" . }} app: {{ template "seaweedfs.name" . }}
component: volume component: volume
{{- end }} {{- end }}
{{- end }}

2
k8s/helm_charts2/templates/volume-statefulset.yaml

@ -40,7 +40,7 @@ spec:
imagePullSecrets: imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }} - name: {{ .Values.global.imagePullSecrets }}
{{- end }} {{- end }}
terminationGracePeriodSeconds: 10
terminationGracePeriodSeconds: 150
{{- if .Values.volume.priorityClassName }} {{- if .Values.volume.priorityClassName }}
priorityClassName: {{ .Values.volume.priorityClassName | quote }} priorityClassName: {{ .Values.volume.priorityClassName | quote }}
{{- end }} {{- end }}

72
k8s/helm_charts2/values.yaml

@ -41,8 +41,7 @@ master:
grpcPort: 19333 grpcPort: 19333
ipBind: "0.0.0.0" ipBind: "0.0.0.0"
volumePreallocate: false volumePreallocate: false
#Master stops directing writes to oversized volumes
volumeSizeLimitMB: 30000
volumeSizeLimitMB: 1000
loggingOverrideLevel: null loggingOverrideLevel: null
#number of seconds between heartbeats, default 5 #number of seconds between heartbeats, default 5
pulseSeconds: null pulseSeconds: null
@ -109,6 +108,26 @@ master:
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: "" priorityClassName: ""
ingress:
enabled: false
className: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
extraEnvironmentVars: extraEnvironmentVars:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 7 WEED_MASTER_VOLUME_GROWTH_COPY_1: 7
WEED_MASTER_VOLUME_GROWTH_COPY_2: 6 WEED_MASTER_VOLUME_GROWTH_COPY_2: 6
@ -161,7 +180,7 @@ volume:
# Directories to store data files. dir[,dir]... (default "/tmp") # Directories to store data files. dir[,dir]... (default "/tmp")
dir: "/data" dir: "/data"
# Directories to store index files. dir[,dir]... (default "/tmp")
# Directories to store index files. dir[,dir]... (default is the same as "dir")
dir_idx: null dir_idx: null
# Maximum numbers of volumes, count[,count]... # Maximum numbers of volumes, count[,count]...
@ -246,8 +265,6 @@ filer:
maxMB: null maxMB: null
# encrypt data on volume servers # encrypt data on volume servers
encryptVolumeData: false encryptVolumeData: false
# enable peers sync metadata, for leveldb (localdb for filer but with sync across)
enable_peers: false
# Whether proxy or redirect to volume server during file GET request # Whether proxy or redirect to volume server during file GET request
redirectOnRead: false redirectOnRead: false
@ -255,12 +272,12 @@ filer:
# Limit sub dir listing size (default 100000) # Limit sub dir listing size (default 100000)
dirListLimit: 100000 dirListLimit: 100000
# Turn off directory listing
disableDirListing: false
# Disable http request, only gRpc operations are allowed # Disable http request, only gRpc operations are allowed
disableHttp: false disableHttp: false
# enablePVC will create a pvc for filer for data persistence.
enablePVC: false
# storage and storageClass are the settings for configuring stateful # storage and storageClass are the settings for configuring stateful
# storage for the master pods. storage should be set to the disk size of # storage for the master pods. storage should be set to the disk size of
# the attached volume. storageClass is the class of storage which defaults # the attached volume. storageClass is the class of storage which defaults
@ -311,6 +328,26 @@ filer:
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: "" priorityClassName: ""
ingress:
enabled: false
className: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
# extraEnvVars is a list of extra enviroment variables to set with the stateful set. # extraEnvVars is a list of extra enviroment variables to set with the stateful set.
extraEnvironmentVars: extraEnvironmentVars:
WEED_MYSQL_ENABLED: "true" WEED_MYSQL_ENABLED: "true"
@ -323,6 +360,7 @@ filer:
WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600" WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600"
# enable usage of memsql as filer backend # enable usage of memsql as filer backend
WEED_MYSQL_INTERPOLATEPARAMS: "true" WEED_MYSQL_INTERPOLATEPARAMS: "true"
# if you want to use leveldb2, then should enable "enablePVC". or you may lose your data.
WEED_LEVELDB2_ENABLED: "false" WEED_LEVELDB2_ENABLED: "false"
# with http DELETE, by default the filer would check whether a folder is empty. # with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf" # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
@ -340,6 +378,7 @@ filer:
# enable user & permission to s3 (need to inject to all services) # enable user & permission to s3 (need to inject to all services)
enableAuth: false enableAuth: false
skipAuthSecretCreation: false skipAuthSecretCreation: false
auditLogConfig: {}
s3: s3:
enabled: false enabled: false
@ -356,6 +395,7 @@ s3:
# enable user & permission to s3 (need to inject to all services) # enable user & permission to s3 (need to inject to all services)
enableAuth: false enableAuth: false
skipAuthSecretCreation: false skipAuthSecretCreation: false
auditLogConfig: {}
# Suffix of the host name, {bucket}.{domainName} # Suffix of the host name, {bucket}.{domainName}
domainName: "" domainName: ""
@ -391,22 +431,6 @@ s3:
size: "" size: ""
storageClass: "" storageClass: ""
cronjob:
enabled: true
master: "seaweedfs-master:9333"
filer: "seaweedfs-filer-client:8888"
tolerations: ""
nodeSelector: |
sw-backend: "true"
replication:
enable: true
collectionPattern: ""
schedule: "*/7 * * * *"
resources: null
# balance all volumes among volume servers
# ALL|EACH_COLLECTION|<collection_name>
collection: ""
certificates: certificates:
commonName: "SeaweedFS CA" commonName: "SeaweedFS CA"

BIN
note/SeaweedFS_Gateway_RemoteObjectStore.png

After

Width: 1017  |  Height: 633  |  Size: 127 KiB

8
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.7</version>
<version>2.85</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>
@ -14,7 +14,7 @@
</parent> </parent>
<properties> <properties>
<protobuf.version>3.9.1</protobuf.version>
<protobuf.version>3.16.1</protobuf.version>
<!-- follow https://github.com/grpc/grpc-java --> <!-- follow https://github.com/grpc/grpc-java -->
<grpc.version>1.23.0</grpc.version> <grpc.version>1.23.0</grpc.version>
<guava.version>30.0-jre</guava.version> <guava.version>30.0-jre</guava.version>
@ -60,7 +60,7 @@
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId> <artifactId>httpmime</artifactId>
<version>4.5.6</version>
<version>4.5.13</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>
@ -135,7 +135,7 @@
<plugin> <plugin>
<groupId>org.sonatype.plugins</groupId> <groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId> <artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.7</version>
<version>1.6.8</version>
<extensions>true</extensions> <extensions>true</extensions>
<configuration> <configuration>
<serverId>ossrh</serverId> <serverId>ossrh</serverId>

6
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.7</version>
<version>2.85</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>
@ -60,7 +60,7 @@
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId> <artifactId>httpmime</artifactId>
<version>4.5.6</version>
<version>4.5.13</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>
@ -130,7 +130,7 @@
<plugin> <plugin>
<groupId>org.sonatype.plugins</groupId> <groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId> <artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.7</version>
<version>1.6.8</version>
<extensions>true</extensions> <extensions>true</extensions>
<configuration> <configuration>
<serverId>ossrh</serverId> <serverId>ossrh</serverId>

4
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.7</version>
<version>2.85</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>
@ -60,7 +60,7 @@
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId> <artifactId>httpmime</artifactId>
<version>4.5.6</version>
<version>4.5.13</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>

2
other/java/client/src/main/java/seaweedfs/client/ChunkCache.java

@ -15,7 +15,7 @@ public class ChunkCache {
} }
this.cache = CacheBuilder.newBuilder() this.cache = CacheBuilder.newBuilder()
.maximumSize(maxEntries) .maximumSize(maxEntries)
.expireAfterAccess(1, TimeUnit.HOURS)
.expireAfterWrite(1, TimeUnit.HOURS)
.build(); .build();
} }

53
other/java/client/src/main/java/seaweedfs/client/FilerClient.java

@ -4,7 +4,6 @@ import com.google.common.base.Strings;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Iterator; import java.util.Iterator;
@ -15,7 +14,11 @@ public class FilerClient extends FilerGrpcClient {
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class); private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
public FilerClient(String host, int grpcPort) { public FilerClient(String host, int grpcPort) {
super(host, grpcPort);
super(host, grpcPort-10000, grpcPort);
}
public FilerClient(String host, int port, int grpcPort) {
super(host, port, grpcPort);
} }
public static String toFileId(FilerProto.FileId fid) { public static String toFileId(FilerProto.FileId fid) {
@ -108,9 +111,9 @@ public class FilerClient extends FilerGrpcClient {
if ("/".equals(path)) { if ("/".equals(path)) {
return true; return true;
} }
File pathFile = new File(path);
String parent = pathFile.getParent().replace('\\','/');
String name = pathFile.getName();
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String name = dirAndName[1];
mkdirs(parent, mode, uid, gid, userName, groupNames); mkdirs(parent, mode, uid, gid, userName, groupNames);
@ -129,35 +132,32 @@ public class FilerClient extends FilerGrpcClient {
public boolean mv(String oldPath, String newPath) { public boolean mv(String oldPath, String newPath) {
File oldPathFile = new File(oldPath);
String oldParent = oldPathFile.getParent().replace('\\','/');
String oldName = oldPathFile.getName();
String[] oldDirAndName = SeaweedUtil.toDirAndName(oldPath);
String oldParent = oldDirAndName[0];
String oldName = oldDirAndName[1];
File newPathFile = new File(newPath);
String newParent = newPathFile.getParent().replace('\\','/');
String newName = newPathFile.getName();
String[] newDirAndName = SeaweedUtil.toDirAndName(newPath);
String newParent = newDirAndName[0];
String newName = newDirAndName[1];
return atomicRenameEntry(oldParent, oldName, newParent, newName); return atomicRenameEntry(oldParent, oldName, newParent, newName);
} }
public boolean exists(String path){ public boolean exists(String path){
File pathFile = new File(path);
String parent = pathFile.getParent();
String entryName = pathFile.getName();
if(parent == null) {
parent = path;
entryName ="";
}
return lookupEntry(parent, entryName) != null;
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String entryName = dirAndName[1];
return lookupEntry(parent, entryName) != null;
} }
public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) { public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
File pathFile = new File(path);
String parent = pathFile.getParent().replace('\\','/');
String name = pathFile.getName();
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String name = dirAndName[1];
return deleteEntry( return deleteEntry(
parent, parent,
@ -168,17 +168,19 @@ public class FilerClient extends FilerGrpcClient {
} }
public boolean touch(String path, int mode) { public boolean touch(String path, int mode) {
String currentUser = System.getProperty("user.name"); String currentUser = System.getProperty("user.name");
long now = System.currentTimeMillis() / 1000L; long now = System.currentTimeMillis() / 1000L;
return touch(path, now, mode, 0, 0, currentUser, new String[]{}); return touch(path, now, mode, 0, 0, currentUser, new String[]{});
} }
public boolean touch(String path, long modifiedTimeSecond, int mode, int uid, int gid, String userName, String[] groupNames) { public boolean touch(String path, long modifiedTimeSecond, int mode, int uid, int gid, String userName, String[] groupNames) {
File pathFile = new File(path);
String parent = pathFile.getParent().replace('\\','/');
String name = pathFile.getName();
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String name = dirAndName[1];
FilerProto.Entry entry = lookupEntry(parent, name); FilerProto.Entry entry = lookupEntry(parent, name);
if (entry == null) { if (entry == null) {
@ -366,6 +368,7 @@ public class FilerClient extends FilerGrpcClient {
.setPathPrefix(prefix) .setPathPrefix(prefix)
.setClientName(clientName) .setClientName(clientName)
.setSinceNs(sinceNs) .setSinceNs(sinceNs)
.setClientId(this.randomClientId)
.build() .build()
); );
} }

11
other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java

@ -11,6 +11,7 @@ import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLException; import javax.net.ssl.SSLException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
public class FilerGrpcClient { public class FilerGrpcClient {
@ -30,6 +31,7 @@ public class FilerGrpcClient {
public final int VOLUME_SERVER_ACCESS_PUBLIC_URL = 1; public final int VOLUME_SERVER_ACCESS_PUBLIC_URL = 1;
public final int VOLUME_SERVER_ACCESS_FILER_PROXY = 2; public final int VOLUME_SERVER_ACCESS_FILER_PROXY = 2;
public final Map<String, FilerProto.Locations> vidLocations = new HashMap<>(); public final Map<String, FilerProto.Locations> vidLocations = new HashMap<>();
protected int randomClientId;
private final ManagedChannel channel; private final ManagedChannel channel;
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub; private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub; private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
@ -40,11 +42,11 @@ public class FilerGrpcClient {
private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT; private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
private String filerAddress; private String filerAddress;
public FilerGrpcClient(String host, int grpcPort) {
this(host, grpcPort, sslContext);
public FilerGrpcClient(String host, int port, int grpcPort) {
this(host, port, grpcPort, sslContext);
} }
public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) {
public FilerGrpcClient(String host, int port, int grpcPort, SslContext sslContext) {
this(sslContext == null ? this(sslContext == null ?
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()
@ -54,7 +56,7 @@ public class FilerGrpcClient {
.negotiationType(NegotiationType.TLS) .negotiationType(NegotiationType.TLS)
.sslContext(sslContext)); .sslContext(sslContext));
filerAddress = String.format("%s:%d", host, grpcPort - 10000);
filerAddress = SeaweedUtil.joinHostPort(host, port);
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse = FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
this.getBlockingStub().getFilerConfiguration( this.getBlockingStub().getFilerConfiguration(
@ -62,6 +64,7 @@ public class FilerGrpcClient {
cipher = filerConfigurationResponse.getCipher(); cipher = filerConfigurationResponse.getCipher();
collection = filerConfigurationResponse.getCollection(); collection = filerConfigurationResponse.getCollection();
replication = filerConfigurationResponse.getReplication(); replication = filerConfigurationResponse.getReplication();
randomClientId = new Random().nextInt();
} }

109
other/java/client/src/main/java/seaweedfs/client/ReadChunks.java

@ -0,0 +1,109 @@
package seaweedfs.client;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
public class ReadChunks {
public static List<SeaweedRead.VisibleInterval> readResolvedChunks(List<FilerProto.FileChunk> chunkList) throws IOException {
List<Point> points = new ArrayList<>(chunkList.size() * 2);
for (FilerProto.FileChunk chunk : chunkList) {
points.add(new Point(chunk.getOffset(), chunk, true));
points.add(new Point(chunk.getOffset() + chunk.getSize(), chunk, false));
}
Collections.sort(points, new Comparator<Point>() {
@Override
public int compare(Point a, Point b) {
int x = (int) (a.x - b.x);
if (a.x != b.x) {
return (int) (a.x - b.x);
}
if (a.ts != b.ts) {
return (int) (a.ts - b.ts);
}
if (!a.isStart) {
return -1;
}
return 1;
}
});
long prevX = 0;
List<SeaweedRead.VisibleInterval> visibles = new ArrayList<>();
ArrayList<Point> queue = new ArrayList<>();
for (Point point : points) {
if (point.isStart) {
if (queue.size() > 0) {
int lastIndex = queue.size() - 1;
Point lastPoint = queue.get(lastIndex);
if (point.x != prevX && lastPoint.ts < point.ts) {
addToVisibles(visibles, prevX, lastPoint, point);
prevX = point.x;
}
}
// insert into queue
for (int i = queue.size(); i >= 0; i--) {
if (i == 0 || queue.get(i - 1).ts <= point.ts) {
if (i == queue.size()) {
prevX = point.x;
}
queue.add(i, point);
break;
}
}
} else {
int lastIndex = queue.size() - 1;
int index = lastIndex;
Point startPoint = null;
for (; index >= 0; index--) {
startPoint = queue.get(index);
if (startPoint.ts == point.ts) {
queue.remove(index);
break;
}
}
if (index == lastIndex && startPoint != null) {
addToVisibles(visibles, prevX, startPoint, point);
prevX = point.x;
}
}
}
return visibles;
}
private static void addToVisibles(List<SeaweedRead.VisibleInterval> visibles, long prevX, Point startPoint, Point point) {
if (prevX < point.x) {
FilerProto.FileChunk chunk = startPoint.chunk;
visibles.add(new SeaweedRead.VisibleInterval(
prevX,
point.x,
chunk.getFileId(),
chunk.getMtime(),
prevX - chunk.getOffset(),
chunk.getOffset() == prevX && chunk.getSize() == prevX - startPoint.x,
chunk.getCipherKey().toByteArray(),
chunk.getIsCompressed()
));
}
}
static class Point {
long x;
long ts;
FilerProto.FileChunk chunk;
boolean isStart;
public Point(long x, FilerProto.FileChunk chunk, boolean isStart) {
this.x = x;
this.ts = chunk.getMtime();
this.chunk = chunk;
this.isStart = isStart;
}
}
}

6
other/java/client/src/main/java/seaweedfs/client/RemoteUtil.java

@ -14,10 +14,10 @@ public class RemoteUtil {
String dir = SeaweedOutputStream.getParentDirectory(fullpath); String dir = SeaweedOutputStream.getParentDirectory(fullpath);
String name = SeaweedOutputStream.getFileName(fullpath); String name = SeaweedOutputStream.getFileName(fullpath);
final FilerProto.DownloadToLocalResponse downloadToLocalResponse = filerClient.getBlockingStub()
.downloadToLocal(FilerProto.DownloadToLocalRequest.newBuilder()
final FilerProto.CacheRemoteObjectToLocalClusterResponse response = filerClient.getBlockingStub()
.cacheRemoteObjectToLocalCluster(FilerProto.CacheRemoteObjectToLocalClusterRequest.newBuilder()
.setDirectory(dir).setName(name).build()); .setDirectory(dir).setName(name).build());
return downloadToLocalResponse.getEntry();
return response.getEntry();
} }
} }

2
other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java

@ -36,7 +36,7 @@ public class SeaweedCipher {
byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length); byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length);
byte[] iv = AES_cipherInstance.getIV(); byte[] iv = AES_cipherInstance.getIV();
byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH];
byte[] message = new byte[GCM_NONCE_LENGTH + length + GCM_TAG_LENGTH];
System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH); System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH);
System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length); System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length);

90
other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java

@ -226,96 +226,8 @@ public class SeaweedRead {
chunkList = FileChunkManifest.resolveChunkManifest(filerClient, chunkList); chunkList = FileChunkManifest.resolveChunkManifest(filerClient, chunkList);
FilerProto.FileChunk[] chunks = chunkList.toArray(new FilerProto.FileChunk[0]);
Arrays.sort(chunks, new Comparator<FilerProto.FileChunk>() {
@Override
public int compare(FilerProto.FileChunk a, FilerProto.FileChunk b) {
// if just a.getMtime() - b.getMtime(), it will overflow!
if (a.getMtime() < b.getMtime()) {
return -1;
} else if (a.getMtime() > b.getMtime()) {
return 1;
}
return 0;
}
});
List<VisibleInterval> visibles = new ArrayList<>();
for (FilerProto.FileChunk chunk : chunks) {
List<VisibleInterval> newVisibles = new ArrayList<>();
visibles = mergeIntoVisibles(visibles, newVisibles, chunk);
}
return visibles;
}
private static List<VisibleInterval> mergeIntoVisibles(List<VisibleInterval> visibles,
List<VisibleInterval> newVisibles,
FilerProto.FileChunk chunk) {
VisibleInterval newV = new VisibleInterval(
chunk.getOffset(),
chunk.getOffset() + chunk.getSize(),
chunk.getFileId(),
chunk.getMtime(),
0,
true,
chunk.getCipherKey().toByteArray(),
chunk.getIsCompressed()
);
// easy cases to speed up
if (visibles.size() == 0) {
visibles.add(newV);
return visibles;
}
if (visibles.get(visibles.size() - 1).stop <= chunk.getOffset()) {
visibles.add(newV);
return visibles;
}
for (VisibleInterval v : visibles) {
if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) {
newVisibles.add(new VisibleInterval(
v.start,
chunk.getOffset(),
v.fileId,
v.modifiedTime,
v.chunkOffset,
false,
v.cipherKey,
v.isCompressed
));
}
long chunkStop = chunk.getOffset() + chunk.getSize();
if (v.start < chunkStop && chunkStop < v.stop) {
newVisibles.add(new VisibleInterval(
chunkStop,
v.stop,
v.fileId,
v.modifiedTime,
v.chunkOffset + (chunkStop - v.start),
false,
v.cipherKey,
v.isCompressed
));
}
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
newVisibles.add(v);
}
}
newVisibles.add(newV);
// keep everything sorted
for (int i = newVisibles.size() - 1; i >= 0; i--) {
if (i > 0 && newV.start < newVisibles.get(i - 1).start) {
newVisibles.set(i, newVisibles.get(i - 1));
} else {
newVisibles.set(i, newV);
break;
}
}
return ReadChunks.readResolvedChunks(chunkList);
return newVisibles;
} }
public static String parseVolumeId(String fileId) { public static String parseVolumeId(String fileId) {

26
other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java

@ -27,4 +27,30 @@ public class SeaweedUtil {
public static CloseableHttpClient getClosableHttpClient() { public static CloseableHttpClient getClosableHttpClient() {
return httpClient; return httpClient;
} }
public static String[] toDirAndName(String fullpath) {
if (fullpath == null) {
return new String[]{"/", ""};
}
if (fullpath.endsWith("/")) {
fullpath = fullpath.substring(0, fullpath.length() - 1);
}
if (fullpath.length() == 0) {
return new String[]{"/", ""};
}
int sep = fullpath.lastIndexOf("/");
String parent = sep == 0 ? "/" : fullpath.substring(0, sep);
String name = fullpath.substring(sep + 1);
return new String[]{parent, name};
}
public static String joinHostPort(String host, int port) {
if (host.startsWith("[") && host.endsWith("]")) {
return host + ":" + port;
}
if (host.indexOf(':')>=0) {
return "[" + host + "]:" + port;
}
return host + ":" + port;
}
} }

30
other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java

@ -29,11 +29,31 @@ public class SeaweedWrite {
final byte[] bytes, final byte[] bytes,
final long bytesOffset, final long bytesLength, final long bytesOffset, final long bytesLength,
final String path) throws IOException { final String path) throws IOException {
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
replication, filerClient, offset, bytes, bytesOffset, bytesLength, path);
synchronized (entry) {
entry.addChunks(chunkBuilder);
IOException lastException = null;
for (long waitTime = 1000L; waitTime < 10 * 1000; waitTime += waitTime / 2) {
try {
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
replication, filerClient, offset, bytes, bytesOffset, bytesLength, path);
lastException = null;
synchronized (entry) {
entry.addChunks(chunkBuilder);
}
break;
} catch (IOException ioe) {
LOG.debug("writeData:{}", ioe);
lastException = ioe;
}
try {
Thread.sleep(waitTime);
} catch (InterruptedException e) {
}
}
if (lastException != null) {
throw lastException;
} }
} }
public static FilerProto.FileChunk.Builder writeChunk(final String replication, public static FilerProto.FileChunk.Builder writeChunk(final String replication,
@ -59,7 +79,7 @@ public class SeaweedWrite {
String fileId = response.getFileId(); String fileId = response.getFileId();
String auth = response.getAuth(); String auth = response.getAuth();
String targetUrl = filerClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl());
String targetUrl = filerClient.getChunkUrl(fileId, response.getLocation().getUrl(), response.getLocation().getPublicUrl());
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY; ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
byte[] cipherKey = null; byte[] cipherKey = null;

2
other/java/client/src/main/java/seaweedfs/client/VolumeIdCache.java

@ -15,7 +15,7 @@ public class VolumeIdCache {
} }
this.cache = CacheBuilder.newBuilder() this.cache = CacheBuilder.newBuilder()
.maximumSize(maxEntries) .maximumSize(maxEntries)
.expireAfterAccess(5, TimeUnit.MINUTES)
.expireAfterWrite(5, TimeUnit.MINUTES)
.build(); .build();
} }

52
other/java/client/src/main/proto/filer.proto

@ -30,6 +30,8 @@ service SeaweedFiler {
rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
} }
rpc StreamRenameEntry (StreamRenameEntryRequest) returns (stream StreamRenameEntryResponse) {
}
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
} }
@ -46,6 +48,9 @@ service SeaweedFiler {
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
} }
rpc Ping (PingRequest) returns (PingResponse) {
}
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
} }
@ -67,7 +72,7 @@ service SeaweedFiler {
rpc KvPut (KvPutRequest) returns (KvPutResponse) { rpc KvPut (KvPutRequest) returns (KvPutResponse) {
} }
rpc DownloadToLocal (DownloadToLocalRequest) returns (DownloadToLocalResponse) {
rpc CacheRemoteObjectToLocalCluster (CacheRemoteObjectToLocalClusterRequest) returns (CacheRemoteObjectToLocalClusterResponse) {
} }
} }
@ -112,6 +117,7 @@ message Entry {
bytes content = 9; // if not empty, the file content bytes content = 9; // if not empty, the file content
RemoteEntry remote_entry = 10; RemoteEntry remote_entry = 10;
int64 quota = 11; // for bucket only. Positive/Negative means enabled/disabled.
} }
message FullEntry { message FullEntry {
@ -160,14 +166,13 @@ message FuseAttributes {
uint32 gid = 5; uint32 gid = 5;
int64 crtime = 6; // unix time in seconds int64 crtime = 6; // unix time in seconds
string mime = 7; string mime = 7;
string replication = 8;
string collection = 9;
int32 ttl_sec = 10; int32 ttl_sec = 10;
string user_name = 11; // for hdfs string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs repeated string group_name = 12; // for hdfs
string symlink_target = 13; string symlink_target = 13;
bytes md5 = 14; bytes md5 = 14;
string disk_type = 15;
uint32 rdev = 16;
uint64 inode = 17;
} }
message CreateEntryRequest { message CreateEntryRequest {
@ -176,6 +181,7 @@ message CreateEntryRequest {
bool o_excl = 3; bool o_excl = 3;
bool is_from_other_cluster = 4; bool is_from_other_cluster = 4;
repeated int32 signatures = 5; repeated int32 signatures = 5;
bool skip_check_parent_directory = 6;
} }
message CreateEntryResponse { message CreateEntryResponse {
@ -225,6 +231,18 @@ message AtomicRenameEntryRequest {
message AtomicRenameEntryResponse { message AtomicRenameEntryResponse {
} }
message StreamRenameEntryRequest {
string old_directory = 1;
string old_name = 2;
string new_directory = 3;
string new_name = 4;
repeated int32 signatures = 5;
}
message StreamRenameEntryResponse {
string directory = 1;
EventNotification event_notification = 2;
int64 ts_ns = 3;
}
message AssignVolumeRequest { message AssignVolumeRequest {
int32 count = 1; int32 count = 1;
string collection = 2; string collection = 2;
@ -233,18 +251,18 @@ message AssignVolumeRequest {
string data_center = 5; string data_center = 5;
string path = 6; string path = 6;
string rack = 7; string rack = 7;
string data_node = 9;
string disk_type = 8; string disk_type = 8;
} }
message AssignVolumeResponse { message AssignVolumeResponse {
string file_id = 1; string file_id = 1;
string url = 2;
string public_url = 3;
int32 count = 4; int32 count = 4;
string auth = 5; string auth = 5;
string collection = 6; string collection = 6;
string replication = 7; string replication = 7;
string error = 8; string error = 8;
Location location = 9;
} }
message LookupVolumeRequest { message LookupVolumeRequest {
@ -258,6 +276,7 @@ message Locations {
message Location { message Location {
string url = 1; string url = 1;
string public_url = 2; string public_url = 2;
uint32 grpc_port = 3;
} }
message LookupVolumeResponse { message LookupVolumeResponse {
map<string, Locations> locations_map = 1; map<string, Locations> locations_map = 1;
@ -292,6 +311,16 @@ message StatisticsResponse {
uint64 file_count = 6; uint64 file_count = 6;
} }
message PingRequest {
string target = 1; // default to ping itself
string target_type = 2;
}
message PingResponse {
int64 start_time_ns = 1;
int64 remote_time_ns = 2;
int64 stop_time_ns = 3;
}
message GetFilerConfigurationRequest { message GetFilerConfigurationRequest {
} }
message GetFilerConfigurationResponse { message GetFilerConfigurationResponse {
@ -306,6 +335,7 @@ message GetFilerConfigurationResponse {
int32 metrics_interval_sec = 10; int32 metrics_interval_sec = 10;
string version = 11; string version = 11;
string cluster_id = 12; string cluster_id = 12;
string filer_group = 13;
} }
message SubscribeMetadataRequest { message SubscribeMetadataRequest {
@ -313,6 +343,9 @@ message SubscribeMetadataRequest {
string path_prefix = 2; string path_prefix = 2;
int64 since_ns = 3; int64 since_ns = 3;
int32 signature = 4; int32 signature = 4;
repeated string path_prefixes = 6;
int32 client_id = 7;
int64 until_ns = 8;
} }
message SubscribeMetadataResponse { message SubscribeMetadataResponse {
string directory = 1; string directory = 1;
@ -381,6 +414,9 @@ message FilerConf {
bool fsync = 6; bool fsync = 6;
uint32 volume_growth_count = 7; uint32 volume_growth_count = 7;
bool read_only = 8; bool read_only = 8;
string data_center = 9;
string rack = 10;
string data_node = 11;
} }
repeated PathConf locations = 2; repeated PathConf locations = 2;
} }
@ -388,10 +424,10 @@ message FilerConf {
///////////////////////// /////////////////////////
// Remote Storage related // Remote Storage related
///////////////////////// /////////////////////////
message DownloadToLocalRequest {
message CacheRemoteObjectToLocalClusterRequest {
string directory = 1; string directory = 1;
string name = 2; string name = 2;
} }
message DownloadToLocalResponse {
message CacheRemoteObjectToLocalClusterResponse {
Entry entry = 1; Entry entry = 1;
} }

123
other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java

@ -6,6 +6,7 @@ import org.junit.Test;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Random;
public class SeaweedReadTest { public class SeaweedReadTest {
@ -13,17 +14,17 @@ public class SeaweedReadTest {
public void testNonOverlappingVisibleIntervals() throws IOException { public void testNonOverlappingVisibleIntervals() throws IOException {
List<FilerProto.FileChunk> chunks = new ArrayList<>(); List<FilerProto.FileChunk> chunks = new ArrayList<>();
chunks.add(FilerProto.FileChunk.newBuilder() chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("aaa")
.setOffset(0)
.setSize(100)
.setMtime(1000)
.build());
.setFileId("aaa")
.setOffset(0)
.setSize(100)
.setMtime(1000)
.build());
chunks.add(FilerProto.FileChunk.newBuilder() chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("bbb")
.setOffset(100)
.setSize(133)
.setMtime(2000)
.build());
.setFileId("bbb")
.setOffset(100)
.setSize(133)
.setMtime(2000)
.build());
List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks); List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks);
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) { for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
@ -61,4 +62,106 @@ public class SeaweedReadTest {
} }
@Test
public void testReadResolvedChunks() throws IOException {
List<FilerProto.FileChunk> chunks = new ArrayList<>();
chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("a")
.setOffset(0)
.setSize(100)
.setMtime(1)
.build());
chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("b")
.setOffset(50)
.setSize(100)
.setMtime(2)
.build());
chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("c")
.setOffset(200)
.setSize(50)
.setMtime(3)
.build());
chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("d")
.setOffset(250)
.setSize(50)
.setMtime(4)
.build());
chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("e")
.setOffset(175)
.setSize(100)
.setMtime(5)
.build());
List<SeaweedRead.VisibleInterval> visibleIntervals = ReadChunks.readResolvedChunks(chunks);
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
System.out.println("visible:" + visibleInterval);
}
Assert.assertEquals(4, visibleIntervals.size());
SeaweedRead.VisibleInterval visibleInterval = visibleIntervals.get(0);
Assert.assertEquals(visibleInterval.start, 0);
Assert.assertEquals(visibleInterval.stop, 50);
Assert.assertEquals(visibleInterval.modifiedTime, 1);
Assert.assertEquals(visibleInterval.fileId, "a");
visibleInterval = visibleIntervals.get(1);
Assert.assertEquals(visibleInterval.start, 50);
Assert.assertEquals(visibleInterval.stop, 150);
Assert.assertEquals(visibleInterval.modifiedTime, 2);
Assert.assertEquals(visibleInterval.fileId, "b");
visibleInterval = visibleIntervals.get(2);
Assert.assertEquals(visibleInterval.start, 175);
Assert.assertEquals(visibleInterval.stop, 275);
Assert.assertEquals(visibleInterval.modifiedTime, 5);
Assert.assertEquals(visibleInterval.fileId, "e");
visibleInterval = visibleIntervals.get(3);
Assert.assertEquals(visibleInterval.start, 275);
Assert.assertEquals(visibleInterval.stop, 300);
Assert.assertEquals(visibleInterval.modifiedTime, 4);
Assert.assertEquals(visibleInterval.fileId, "d");
}
@Test
public void testRandomizedReadResolvedChunks() throws IOException {
Random random = new Random();
int limit = 1024*1024;
long[] array = new long[limit];
List<FilerProto.FileChunk> chunks = new ArrayList<>();
for (long ts=0;ts<1024;ts++){
int x = random.nextInt(limit);
int y = random.nextInt(limit);
int size = Math.min(Math.abs(x-y), 1024);
chunks.add(randomWrite(array, Math.min(x,y), size, ts));
}
List<SeaweedRead.VisibleInterval> visibleIntervals = ReadChunks.readResolvedChunks(chunks);
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
System.out.println("visible:" + visibleInterval);
for (int i = (int) visibleInterval.start; i<visibleInterval.stop; i++) {
Assert.assertEquals(array[i], visibleInterval.modifiedTime);
}
}
}
private FilerProto.FileChunk randomWrite(long[] array, int start, int size, long ts) {
for (int i=start;i<start+size;i++) {
array[i] = ts;
}
return FilerProto.FileChunk.newBuilder()
.setFileId("")
.setOffset(start)
.setSize(size)
.setMtime(ts)
.build();
}
} }

6
other/java/examples/pom.xml

@ -11,19 +11,19 @@
<dependency> <dependency>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.6.7</version>
<version>2.85</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-hadoop2-client</artifactId> <artifactId>seaweedfs-hadoop2-client</artifactId>
<version>1.6.7</version>
<version>2.85</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<version>2.9.2</version>
<version>2.10.1</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
</dependencies> </dependencies>

2
other/java/hdfs-over-ftp/pom.xml

@ -36,7 +36,7 @@
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId> <artifactId>hadoop-common</artifactId>
<version>3.2.1</version>
<version>3.2.3</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>

4
other/java/hdfs2/dependency-reduced-pom.xml

@ -86,7 +86,7 @@
<plugin> <plugin>
<groupId>org.sonatype.plugins</groupId> <groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId> <artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.7</version>
<version>1.6.8</version>
<extensions>true</extensions> <extensions>true</extensions>
<configuration> <configuration>
<serverId>ossrh</serverId> <serverId>ossrh</serverId>
@ -301,7 +301,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
<seaweedfs.client.version>2.85</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>
</project> </project>

6
other/java/hdfs2/pom.xml

@ -5,8 +5,8 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
<seaweedfs.client.version>2.85</seaweedfs.client.version>
<hadoop.version>2.10.1</hadoop.version>
</properties> </properties>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
@ -105,7 +105,7 @@
<plugin> <plugin>
<groupId>org.sonatype.plugins</groupId> <groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId> <artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.7</version>
<version>1.6.8</version>
<extensions>true</extensions> <extensions>true</extensions>
<configuration> <configuration>
<serverId>ossrh</serverId> <serverId>ossrh</serverId>

8
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java

@ -23,6 +23,7 @@ public class SeaweedFileSystem extends FileSystem {
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
public static final String FS_SEAWEED_FILER_PORT_GRPC = "fs.seaweed.filer.port.grpc";
public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size"; public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication"; public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
@ -50,9 +51,6 @@ public class SeaweedFileSystem extends FileSystem {
// get host information from uri (overrides info in conf) // get host information from uri (overrides info in conf)
String host = uri.getHost(); String host = uri.getHost();
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host; host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
if (host == null) {
throw new IOException("Invalid host specified");
}
conf.set(FS_SEAWEED_FILER_HOST, host); conf.set(FS_SEAWEED_FILER_HOST, host);
// get port information from uri, (overrides info in conf) // get port information from uri, (overrides info in conf)
@ -60,10 +58,12 @@ public class SeaweedFileSystem extends FileSystem {
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port; port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
conf.setInt(FS_SEAWEED_FILER_PORT, port); conf.setInt(FS_SEAWEED_FILER_PORT, port);
int grpcPort = conf.getInt(FS_SEAWEED_FILER_PORT_GRPC, port+10000);
setConf(conf); setConf(conf);
this.uri = uri; this.uri = uri;
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf);
} }

5
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -27,9 +27,8 @@ public class SeaweedFileSystemStore {
private FilerClient filerClient; private FilerClient filerClient;
private Configuration conf; private Configuration conf;
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
int grpcPort = 10000 + port;
filerClient = new FilerClient(host, grpcPort);
public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) {
filerClient = new FilerClient(host, port, grpcPort);
this.conf = conf; this.conf = conf;
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct"); String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
if (volumeServerAccessMode.equals("publicUrl")) { if (volumeServerAccessMode.equals("publicUrl")) {

4
other/java/hdfs3/dependency-reduced-pom.xml

@ -86,7 +86,7 @@
<plugin> <plugin>
<groupId>org.sonatype.plugins</groupId> <groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId> <artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.7</version>
<version>1.6.8</version>
<extensions>true</extensions> <extensions>true</extensions>
<configuration> <configuration>
<serverId>ossrh</serverId> <serverId>ossrh</serverId>
@ -309,7 +309,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
<seaweedfs.client.version>2.85</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>
</project> </project>

6
other/java/hdfs3/pom.xml

@ -5,8 +5,8 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
<seaweedfs.client.version>2.85</seaweedfs.client.version>
<hadoop.version>3.2.3</hadoop.version>
</properties> </properties>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
@ -105,7 +105,7 @@
<plugin> <plugin>
<groupId>org.sonatype.plugins</groupId> <groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId> <artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.7</version>
<version>1.6.8</version>
<extensions>true</extensions> <extensions>true</extensions>
<configuration> <configuration>
<serverId>ossrh</serverId> <serverId>ossrh</serverId>

8
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java

@ -23,6 +23,7 @@ public class SeaweedFileSystem extends FileSystem {
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
public static final String FS_SEAWEED_FILER_PORT_GRPC = "fs.seaweed.filer.port.grpc";
public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size"; public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication"; public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
@ -50,9 +51,6 @@ public class SeaweedFileSystem extends FileSystem {
// get host information from uri (overrides info in conf) // get host information from uri (overrides info in conf)
String host = uri.getHost(); String host = uri.getHost();
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host; host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
if (host == null) {
throw new IOException("Invalid host specified");
}
conf.set(FS_SEAWEED_FILER_HOST, host); conf.set(FS_SEAWEED_FILER_HOST, host);
// get port information from uri, (overrides info in conf) // get port information from uri, (overrides info in conf)
@ -60,10 +58,12 @@ public class SeaweedFileSystem extends FileSystem {
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port; port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
conf.setInt(FS_SEAWEED_FILER_PORT, port); conf.setInt(FS_SEAWEED_FILER_PORT, port);
int grpcPort = conf.getInt(FS_SEAWEED_FILER_PORT_GRPC, port+10000);
setConf(conf); setConf(conf);
this.uri = uri; this.uri = uri;
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf);
} }

5
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java

@ -27,9 +27,8 @@ public class SeaweedFileSystemStore {
private FilerClient filerClient; private FilerClient filerClient;
private Configuration conf; private Configuration conf;
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
int grpcPort = 10000 + port;
filerClient = new FilerClient(host, grpcPort);
public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) {
filerClient = new FilerClient(host, port, grpcPort);
this.conf = conf; this.conf = conf;
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct"); String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
if (volumeServerAccessMode.equals("publicUrl")) { if (volumeServerAccessMode.equals("publicUrl")) {

16
other/metrics/grafana_seaweedfs.json

@ -539,11 +539,12 @@
"step": 60 "step": 60
}, },
{ {
"expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
"expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type, bucket))",
"format": "time_series", "format": "time_series",
"hide": false, "hide": false,
"interval": "",
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "{{type}}",
"legendFormat": "{{bucket}} {{type}}",
"refId": "B", "refId": "B",
"step": 60 "step": 60
} }
@ -645,11 +646,12 @@
"step": 60 "step": 60
}, },
{ {
"expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
"expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type, bucket))",
"format": "time_series", "format": "time_series",
"hide": false, "hide": false,
"interval": "",
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "{{type}}",
"legendFormat": "{{bucket}} {{type}}",
"refId": "B", "refId": "B",
"step": 60 "step": 60
} }
@ -751,11 +753,11 @@
"step": 60 "step": 60
}, },
{ {
"expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))",
"expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type, bucket))",
"format": "time_series", "format": "time_series",
"hide": false, "hide": false,
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "{{type}}",
"legendFormat": "{{bucket}} {{type}}",
"refId": "B", "refId": "B",
"step": 60 "step": 60
} }
@ -864,7 +866,7 @@
"expr": "rate(SeaweedFS_s3_request_total[1m])", "expr": "rate(SeaweedFS_s3_request_total[1m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "{{type}}",
"legendFormat": "{{bucket}} {{type}}",
"refId": "A", "refId": "A",
"step": 30 "step": 30
} }

1932
other/metrics/grafana_seaweedfs_heartbeat.json
File diff suppressed because it is too large
View File

15
test/s3/basic/basic_test.go

@ -2,14 +2,15 @@ package basic
import ( import (
"fmt" "fmt"
"io"
"os"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"io/ioutil"
"os"
"strings"
"testing"
) )
var ( var (
@ -108,8 +109,8 @@ func TestListBucket(t *testing.T) {
func TestListObjectV2(t *testing.T) { func TestListObjectV2(t *testing.T) {
listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(Bucket),
Prefix: aws.String("foo"),
Bucket: aws.String(Bucket),
Prefix: aws.String("foo"),
Delimiter: aws.String("/"), Delimiter: aws.String("/"),
}) })
if err != nil { if err != nil {
@ -169,7 +170,7 @@ func TestObjectOp(t *testing.T) {
exitErrorf("Unable to get copy object, %v", err) exitErrorf("Unable to get copy object, %v", err)
} }
data, err := ioutil.ReadAll(getObj.Body)
data, err := io.ReadAll(getObj.Body)
if err != nil { if err != nil {
exitErrorf("Unable to read object data, %v", err) exitErrorf("Unable to read object data, %v", err)
} }

2
test/s3/compatibility/.gitignore

@ -0,0 +1,2 @@
/s3-tests
/tmp

11
test/s3/compatibility/Dockerfile

@ -0,0 +1,11 @@
# the tests only support python 3.6, not newer
FROM ubuntu:latest
RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install -y git-core sudo tzdata
RUN git clone https://github.com/ceph/s3-tests.git
WORKDIR s3-tests
# we pin a certain commit
RUN git checkout 9a6a1e9f197fc9fb031b809d1e057635c2ff8d4e
RUN ./bootstrap

13
test/s3/compatibility/README.md

@ -0,0 +1,13 @@
# Running S3 Compatibility tests against SeaweedFS
This is using [the tests from CephFS](https://github.com/ceph/s3-tests).
## Prerequisites
- have Docker installed
- this has been executed on Mac. On Linux, the hostname in `s3tests.conf` needs to be adjusted.
## Running tests
- `./prepare.sh` to build the docker image
- `./run.sh` to execute all tests

5
test/s3/compatibility/prepare.sh

@ -0,0 +1,5 @@
#!/usr/bin/env bash
set -ex
docker build --progress=plain -t s3tests .

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save