Browse Source

Merge branch 'master' into _product

# Conflicts:
#	weed/s3api/s3api_bucket_handlers.go
pull/5936/head
changlin.shi 2 years ago
parent
commit
deb0eef8a8
  1. 12
      .github/workflows/binaries_dev.yml
  2. 6
      .github/workflows/binaries_release0.yml
  3. 6
      .github/workflows/binaries_release1.yml
  4. 6
      .github/workflows/binaries_release2.yml
  5. 6
      .github/workflows/binaries_release3.yml
  6. 6
      .github/workflows/binaries_release4.yml
  7. 2
      .github/workflows/codeql.yml
  8. 8
      .github/workflows/container_dev.yml
  9. 8
      .github/workflows/container_latest.yml
  10. 8
      .github/workflows/container_release1.yml
  11. 8
      .github/workflows/container_release2.yml
  12. 8
      .github/workflows/container_release3.yml
  13. 8
      .github/workflows/container_release4.yml
  14. 8
      .github/workflows/container_release5.yml
  15. 4
      .github/workflows/depsreview.yml
  16. 39
      .github/workflows/e2e.yml
  17. 2
      .github/workflows/go.yml
  18. 22
      .github/workflows/helm_chart_release.yml
  19. 74
      CODE_OF_CONDUCT.md
  20. 5
      README.md
  21. 1
      backers.md
  22. 2
      docker/Dockerfile.go_build
  23. 2
      docker/Dockerfile.rocksdb_dev_env
  24. 2
      docker/Dockerfile.rocksdb_large
  25. 14
      docker/seaweedfs.sql
  26. 149
      go.mod
  27. 770
      go.sum
  28. 13
      helm/index.yaml
  29. BIN
      helm/seaweedfs-3.43.tgz
  30. 0
      k8s/charts/seaweedfs/.helmignore
  31. 4
      k8s/charts/seaweedfs/Chart.yaml
  32. 14
      k8s/charts/seaweedfs/README.md
  33. 0
      k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
  34. 0
      k8s/charts/seaweedfs/templates/_helpers.tpl
  35. 0
      k8s/charts/seaweedfs/templates/ca-cert.yaml
  36. 0
      k8s/charts/seaweedfs/templates/cert-clusterissuer.yaml
  37. 0
      k8s/charts/seaweedfs/templates/client-cert.yaml
  38. 0
      k8s/charts/seaweedfs/templates/filer-cert.yaml
  39. 0
      k8s/charts/seaweedfs/templates/filer-service-client.yaml
  40. 0
      k8s/charts/seaweedfs/templates/filer-service.yaml
  41. 0
      k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml
  42. 2
      k8s/charts/seaweedfs/templates/filer-statefulset.yaml
  43. 0
      k8s/charts/seaweedfs/templates/ingress.yaml
  44. 0
      k8s/charts/seaweedfs/templates/master-cert.yaml
  45. 0
      k8s/charts/seaweedfs/templates/master-service.yaml
  46. 0
      k8s/charts/seaweedfs/templates/master-servicemonitor.yaml
  47. 4
      k8s/charts/seaweedfs/templates/master-statefulset.yaml
  48. 2
      k8s/charts/seaweedfs/templates/s3-deployment.yaml
  49. 0
      k8s/charts/seaweedfs/templates/s3-service.yaml
  50. 0
      k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml
  51. 0
      k8s/charts/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml
  52. 0
      k8s/charts/seaweedfs/templates/seaweedfs-s3-secret.yaml
  53. 0
      k8s/charts/seaweedfs/templates/secret-seaweedfs-db.yaml
  54. 0
      k8s/charts/seaweedfs/templates/security-configmap.yaml
  55. 0
      k8s/charts/seaweedfs/templates/service-account.yaml
  56. 0
      k8s/charts/seaweedfs/templates/volume-cert.yaml
  57. 0
      k8s/charts/seaweedfs/templates/volume-service.yaml
  58. 0
      k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml
  59. 4
      k8s/charts/seaweedfs/templates/volume-statefulset.yaml
  60. 4
      k8s/charts/seaweedfs/values.yaml
  61. 4
      other/java/s3copier/pom.xml
  62. 15
      other/java/s3copier/src/main/java/com/seaweedfs/s3/HighLevelMultipartUpload.java
  63. 3
      unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go
  64. 1
      weed/command/filer.go
  65. 2
      weed/command/filer_cat.go
  66. 20
      weed/command/filer_copy.go
  67. 2
      weed/command/filer_meta_backup.go
  68. 2
      weed/command/filer_remote_gateway.go
  69. 2
      weed/command/filer_remote_sync.go
  70. 4
      weed/command/filer_sync.go
  71. 2
      weed/command/iam.go
  72. 4
      weed/command/mount.go
  73. 3
      weed/command/mount_std.go
  74. 2
      weed/command/s3.go
  75. 30
      weed/command/scaffold/filer.toml
  76. 1
      weed/command/scaffold/replication.toml
  77. 1
      weed/command/server.go
  78. 2
      weed/command/webdav.go
  79. 2
      weed/filer/configuration.go
  80. 155
      weed/filer/filechunk_group.go
  81. 36
      weed/filer/filechunk_group_test.go
  82. 4
      weed/filer/filechunk_manifest.go
  83. 134
      weed/filer/filechunk_section.go
  84. 48
      weed/filer/filechunk_section_test.go
  85. 218
      weed/filer/filechunks.go
  86. 90
      weed/filer/filechunks_read.go
  87. 86
      weed/filer/filechunks_read_test.go
  88. 174
      weed/filer/filechunks_test.go
  89. 9
      weed/filer/filer.go
  90. 2
      weed/filer/filer_conf.go
  91. 2
      weed/filer/filer_notify_append.go
  92. 259
      weed/filer/interval_list.go
  93. 327
      weed/filer/interval_list_test.go
  94. 16
      weed/filer/meta_aggregator.go
  95. 14
      weed/filer/mysql/mysql_sql_gen.go
  96. 4
      weed/filer/mysql/mysql_store.go
  97. 4
      weed/filer/mysql2/mysql2_store.go
  98. 66
      weed/filer/reader_at.go
  99. 72
      weed/filer/reader_at_test.go
  100. 11
      weed/filer/reader_cache.go

12
.github/workflows/binaries_dev.yml

@ -38,13 +38,13 @@ jobs:
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk - name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -60,7 +60,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -87,13 +87,13 @@ jobs:
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk - name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -109,7 +109,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release0.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release1.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release2.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release3.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release4.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -45,7 +45,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

2
.github/workflows/codeql.yml

@ -18,7 +18,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL

8
.github/workflows/container_dev.yml

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -36,7 +36,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1 uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
with: with:
buildkitd-flags: "--debug" buildkitd-flags: "--debug"
- -
@ -56,7 +56,7 @@ jobs:
password: ${{ secrets.GHCR_TOKEN }} password: ${{ secrets.GHCR_TOKEN }}
- -
name: Build name: Build
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_latest.yml

@ -17,11 +17,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1 uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
with: with:
buildkitd-flags: "--debug" buildkitd-flags: "--debug"
- -
@ -57,7 +57,7 @@ jobs:
password: ${{ secrets.GHCR_TOKEN }} password: ${{ secrets.GHCR_TOKEN }}
- -
name: Build name: Build
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release1.yml

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1 uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
@ -47,7 +47,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Build name: Build
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release2.yml

@ -17,11 +17,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -38,7 +38,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1 uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
@ -48,7 +48,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Build name: Build
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release3.yml

@ -17,11 +17,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -38,7 +38,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1 uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
@ -48,7 +48,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Build name: Build
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release4.yml

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1 uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
@ -47,7 +47,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Build name: Build
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

8
.github/workflows/container_release5.yml

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@57396166ad8aefe6098280995947635806a0e6ea # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1 uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
@ -47,7 +47,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Build name: Build
uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with: with:
context: ./docker context: ./docker
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

4
.github/workflows/depsreview.yml

@ -9,6 +9,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: 'Checkout Repository' - name: 'Checkout Repository'
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
- name: 'Dependency Review' - name: 'Dependency Review'
uses: actions/dependency-review-action@0ff3da6f81b812d4ec3cf37a04e2308c7a723730
uses: actions/dependency-review-action@c090f4e553673e6e505ea70d6a95362ee12adb94

39
.github/workflows/e2e.yml

@ -30,7 +30,7 @@ jobs:
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Install dependencies - name: Install dependencies
run: | run: |
@ -42,28 +42,43 @@ jobs:
run: make build_e2e && docker compose -f ./compose/e2e-mount.yml up --wait run: make build_e2e && docker compose -f ./compose/e2e-mount.yml up --wait
- name: Run FIO - name: Run FIO
timeout-minutes: 5
timeout-minutes: 15
run: | run: |
echo "Starting FIO at: $(date)" echo "Starting FIO at: $(date)"
# Concurrent r/w # Concurrent r/w
echo 'Run randrw with size=16M bs=4k' echo 'Run randrw with size=16M bs=4k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 40 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo 'Run randrw with size=16M bs=128k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 40 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo 'Run randrw with size=16M bs=1m'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 40 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo "Verify FIO at: $(date)"
# Verified write # Verified write
echo 'Run randwrite with size=16M bs=4k' echo 'Run randwrite with size=16M bs=4k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 40 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Run FIO
timeout-minutes: 15
run: |
echo "Starting FIO at: $(date)"
# Concurrent r/w
echo 'Run randrw with size=16M bs=128k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo "Verify FIO at: $(date)"
# Verified write
echo 'Run randwrite with size=16M bs=128k' echo 'Run randwrite with size=16M bs=128k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 40 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Run FIO
timeout-minutes: 15
run: |
echo "Starting FIO at: $(date)"
# Concurrent r/w
echo 'Run randrw with size=16M bs=1m'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo "Verify FIO at: $(date)"
# Verified write
echo 'Run randwrite with size=16M bs=1m' echo 'Run randwrite with size=16M bs=1m'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 40 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Save logs - name: Save logs
if: always() if: always()

2
.github/workflows/go.yml

@ -27,7 +27,7 @@ jobs:
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Get dependencies - name: Get dependencies
run: | run: |

22
.github/workflows/helm_chart_release.yml

@ -0,0 +1,22 @@
name: "helm: publish charts"
on:
push:
tags:
- '*'
permissions:
contents: write
pages: write
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Publish Helm charts
uses: stefanprodan/helm-gh-pages@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
charts_dir: k8s/charts
target_dir: helm
branch: master

74
CODE_OF_CONDUCT.md

@ -0,0 +1,74 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to make participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
nationality, personal appearance, race, religion, or sexual identity and
orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or
advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic
address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at <enteremailhere>. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

5
README.md

@ -3,7 +3,7 @@
[![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) [![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
[![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs) [![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs)
[![Build Status](https://img.shields.io/github/workflow/status/chrislusf/seaweedfs/Go)](https://github.com/seaweedfs/seaweedfs/actions/workflows/go.yml)
[![Build Status](https://img.shields.io/github/actions/workflow/status/seaweedfs/seaweedfs/go.yml)](https://github.com/seaweedfs/seaweedfs/actions/workflows/go.yml)
[![GoDoc](https://godoc.org/github.com/seaweedfs/seaweedfs/weed?status.svg)](https://godoc.org/github.com/seaweedfs/seaweedfs/weed) [![GoDoc](https://godoc.org/github.com/seaweedfs/seaweedfs/weed?status.svg)](https://godoc.org/github.com/seaweedfs/seaweedfs/weed)
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/seaweedfs/seaweedfs/wiki) [![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/seaweedfs/seaweedfs/wiki)
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/) [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
@ -66,6 +66,7 @@ Table of Contents
* [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph) * [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph)
* [Compared to GlusterFS](#compared-to-glusterfs) * [Compared to GlusterFS](#compared-to-glusterfs)
* [Compared to Ceph](#compared-to-ceph) * [Compared to Ceph](#compared-to-ceph)
* [Compared to Minio](#compared-to-minio)
* [Dev Plan](#dev-plan) * [Dev Plan](#dev-plan)
* [Installation Guide](#installation-guide) * [Installation Guide](#installation-guide)
* [Disk Related Topics](#disk-related-topics) * [Disk Related Topics](#disk-related-topics)
@ -79,7 +80,7 @@ Table of Contents
`docker run -p 8333:8333 chrislusf/seaweedfs server -s3` `docker run -p 8333:8333 chrislusf/seaweedfs server -s3`
## Quick Start with Single Binary ## ## Quick Start with Single Binary ##
* Download the latest binary from https://github.com/seaweedfs/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
* Download the latest binary from https://github.com/seaweedfs/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`. Or run `go install github.com/seaweedfs/seaweedfs/weed@latest`.
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway. * Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it! Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!

1
backers.md

@ -7,6 +7,7 @@
- [Evercam Camera Management Software](https://evercam.io/) - [Evercam Camera Management Software](https://evercam.io/)
- [Spherical Elephant GmbH](https://www.sphericalelephant.com) - [Spherical Elephant GmbH](https://www.sphericalelephant.com)
- [WizardTales GmbH](https://www.wizardtales.com)
- <h2 align="center">Backers</h2> - <h2 align="center">Backers</h2>

2
docker/Dockerfile.go_build

@ -1,4 +1,4 @@
FROM golang:1.19-alpine as builder
FROM golang:1.20-alpine as builder
RUN apk add git g++ fuse RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/seaweedfs/ RUN mkdir -p /go/src/github.com/seaweedfs/
RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs

2
docker/Dockerfile.rocksdb_dev_env

@ -1,4 +1,4 @@
FROM golang:1.19-buster as builder
FROM golang:1.20-buster as builder
RUN apt-get update RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev

2
docker/Dockerfile.rocksdb_large

@ -1,4 +1,4 @@
FROM golang:1.19-buster as builder
FROM golang:1.20-buster as builder
RUN apt-get update RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev

14
docker/seaweedfs.sql

@ -3,10 +3,10 @@ CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret';
GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%'; GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%';
FLUSH PRIVILEGES; FLUSH PRIVILEGES;
USE seaweedfs; USE seaweedfs;
CREATE TABLE IF NOT EXISTS filemeta (
dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
name VARCHAR(1000) COMMENT 'directory or file name',
directory TEXT COMMENT 'full path to parent directory',
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `filemeta` (
`dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
`name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
`directory` TEXT NOT NULL COMMENT 'full path to parent directory',
`meta` LONGBLOB,
PRIMARY KEY (`dirhash`, `name`)
) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;

149
go.mod

@ -1,39 +1,39 @@
module github.com/seaweedfs/seaweedfs module github.com/seaweedfs/seaweedfs
go 1.19
go 1.20
require ( require (
cloud.google.com/go v0.105.0 // indirect
cloud.google.com/go v0.107.0 // indirect
cloud.google.com/go/pubsub v1.28.0 cloud.google.com/go/pubsub v1.28.0
cloud.google.com/go/storage v1.28.1
cloud.google.com/go/storage v1.29.0
github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.37.2
github.com/aws/aws-sdk-go v1.44.167
github.com/Shopify/sarama v1.38.1
github.com/aws/aws-sdk-go v1.44.189
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bwmarrin/snowflake v0.3.0 github.com/bwmarrin/snowflake v0.3.0
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/disintegration/imaging v1.6.2 github.com/disintegration/imaging v1.6.2
github.com/dustin/go-humanize v1.0.0
github.com/dustin/go-humanize v1.0.1
github.com/eapache/go-resiliency v1.3.0 // indirect github.com/eapache/go-resiliency v1.3.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
github.com/eapache/queue v1.1.0 // indirect github.com/eapache/queue v1.1.0 // indirect
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/fclairamb/ftpserverlib v0.20.0
github.com/fclairamb/ftpserverlib v0.21.0
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-errors/errors v1.1.1 // indirect github.com/go-errors/errors v1.1.1 // indirect
github.com/go-redis/redis/v8 v8.11.5 github.com/go-redis/redis/v8 v8.11.5
github.com/go-redsync/redsync/v4 v4.7.1
github.com/go-redsync/redsync/v4 v4.8.1
github.com/go-sql-driver/mysql v1.7.0 github.com/go-sql-driver/mysql v1.7.0
github.com/go-zookeeper/zk v1.0.2 // indirect
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d
github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang-jwt/jwt v3.2.2+incompatible
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@ -57,21 +57,20 @@ require (
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/karlseguin/ccache/v2 v2.0.8 github.com/karlseguin/ccache/v2 v2.0.8
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.15.11 // indirect
github.com/klauspost/reedsolomon v1.11.3
github.com/klauspost/compress v1.15.14 // indirect
github.com/klauspost/reedsolomon v1.11.7
github.com/kurin/blazer v0.5.3 github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.7 github.com/lib/pq v1.10.7
github.com/linxGnu/grocksdb v1.7.10
github.com/magiconair/properties v1.8.6 // indirect
github.com/linxGnu/grocksdb v1.7.14
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-ieproxy v0.0.9 // indirect github.com/mattn/go-ieproxy v0.0.9 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/olivere/elastic/v7 v7.0.32 github.com/olivere/elastic/v7 v7.0.32
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/peterh/liner v1.2.2 github.com/peterh/liner v1.2.2
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
@ -83,13 +82,13 @@ require (
github.com/prometheus/procfs v0.9.0 github.com/prometheus/procfs v0.9.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/seaweedfs/goexif v2.0.0+incompatible
github.com/seaweedfs/goexif v1.0.3
github.com/seaweedfs/raft v1.1.0 github.com/seaweedfs/raft v1.1.0
github.com/sirupsen/logrus v1.9.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/afero v1.9.2 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.14.0
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.1 github.com/stretchr/testify v1.8.1
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
@ -103,78 +102,78 @@ require (
github.com/viant/ptrie v0.3.0 github.com/viant/ptrie v0.3.0
github.com/viant/toolbox v0.33.2 // indirect github.com/viant/toolbox v0.33.2 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.1 // indirect
github.com/xdg-go/stringprep v1.0.3 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.etcd.io/etcd/client/v3 v3.5.6
go.etcd.io/etcd/client/v3 v3.5.7
go.mongodb.org/mongo-driver v1.11.1 go.mongodb.org/mongo-driver v1.11.1
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
gocloud.dev v0.27.0
gocloud.dev/pubsub/natspubsub v0.27.0
gocloud.dev v0.28.0
gocloud.dev/pubsub/natspubsub v0.28.0
gocloud.dev/pubsub/rabbitpubsub v0.27.0 gocloud.dev/pubsub/rabbitpubsub v0.27.0
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd
golang.org/x/crypto v0.3.0 // indirect
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 golang.org/x/image v0.0.0-20200119044424-58c23975cae1
golang.org/x/net v0.4.0
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
golang.org/x/sys v0.3.0
golang.org/x/text v0.5.0 // indirect
golang.org/x/tools v0.4.0
golang.org/x/net v0.7.0
golang.org/x/oauth2 v0.2.0 // indirect
golang.org/x/sys v0.5.0
golang.org/x/text v0.7.0 // indirect
golang.org/x/tools v0.6.0
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/api v0.105.0
google.golang.org/api v0.108.0
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect
google.golang.org/grpc v1.51.0
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
google.golang.org/grpc v1.52.0
google.golang.org/protobuf v1.28.1 google.golang.org/protobuf v1.28.1
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect modernc.org/b v1.0.0 // indirect
modernc.org/cc/v3 v3.40.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect
modernc.org/ccgo/v3 v3.16.13 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect
modernc.org/libc v1.21.5 // indirect
modernc.org/libc v1.22.2 // indirect
modernc.org/mathutil v1.5.0 // indirect modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect modernc.org/memory v1.4.0 // indirect
modernc.org/opt v0.1.3 // indirect modernc.org/opt v0.1.3 // indirect
modernc.org/sqlite v1.20.0
modernc.org/sqlite v1.20.4
modernc.org/strutil v1.1.3 modernc.org/strutil v1.1.3
modernc.org/token v1.0.1 // indirect modernc.org/token v1.0.1 // indirect
) )
require ( require (
github.com/Jille/raft-grpc-transport v1.3.0
github.com/Jille/raft-grpc-transport v1.4.0
github.com/arangodb/go-driver v1.4.1 github.com/arangodb/go-driver v1.4.1
github.com/armon/go-metrics v0.4.1 github.com/armon/go-metrics v0.4.1
github.com/fluent/fluent-logger-golang v1.9.0 github.com/fluent/fluent-logger-golang v1.9.0
github.com/google/flatbuffers v22.11.23+incompatible
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17 github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17
github.com/hashicorp/raft v1.3.11 github.com/hashicorp/raft v1.3.11
github.com/hashicorp/raft-boltdb/v2 v2.2.2 github.com/hashicorp/raft-boltdb/v2 v2.2.2
github.com/rabbitmq/amqp091-go v1.5.0
github.com/schollz/progressbar/v3 v3.12.2
github.com/tikv/client-go/v2 v2.0.3
github.com/rabbitmq/amqp091-go v1.7.0
github.com/schollz/progressbar/v3 v3.13.0
github.com/tikv/client-go/v2 v2.0.5
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2
github.com/ydb-platform/ydb-go-sdk/v3 v3.41.0
golang.org/x/sync v0.1.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.42.10
google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1 google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1
) )
require ( require (
cloud.google.com/go/compute v1.13.0 // indirect
cloud.google.com/go/compute/metadata v0.2.2 // indirect
cloud.google.com/go/compute v1.14.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v0.8.0 // indirect cloud.google.com/go/iam v0.8.0 // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/aws/aws-sdk-go-v2 v1.16.8 // indirect
github.com/aws/aws-sdk-go-v2/config v1.15.15 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.10 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.15 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.16 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.17.10 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.19.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.10 // indirect
github.com/aws/smithy-go v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.17.1 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.18.6 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.19.15 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 // indirect
github.com/aws/smithy-go v1.13.4 // indirect
github.com/benbjohnson/clock v1.1.0 // indirect github.com/benbjohnson/clock v1.1.0 // indirect
github.com/boltdb/bolt v1.3.1 // indirect github.com/boltdb/bolt v1.3.1 // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
@ -183,8 +182,8 @@ require (
github.com/fatih/color v1.13.0 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/fclairamb/go-log v0.4.1 // indirect github.com/fclairamb/go-log v0.4.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
github.com/golang-jwt/jwt/v4 v4.4.3 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/hashicorp/go-hclog v1.2.0 // indirect github.com/hashicorp/go-hclog v1.2.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
@ -201,23 +200,24 @@ require (
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/montanaflynn/stats v0.6.6 // indirect github.com/montanaflynn/stats v0.6.6 // indirect
github.com/nats-io/nats.go v1.16.0 // indirect
github.com/nats-io/nats.go v1.20.0 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect github.com/nats-io/nuid v1.0.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/philhofer/fwd v1.1.1 // indirect github.com/philhofer/fwd v1.1.1 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect
github.com/pingcap/kvproto v0.0.0-20221129023506-621ec37aac7a // indirect
github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81 // indirect
github.com/pingcap/kvproto v0.0.0-20230201112839-2b853bed8125 // indirect
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
github.com/rivo/uniseg v0.4.3 // indirect github.com/rivo/uniseg v0.4.3 // indirect
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stathat/consistent v1.0.0 // indirect github.com/stathat/consistent v1.0.0 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect
github.com/tikv/pd v1.1.0-beta.0.20230202094356-18df271ce57f // indirect
github.com/tikv/pd/client v0.0.0-20230202094356-18df271ce57f // indirect
github.com/tinylib/msgp v1.1.6 // indirect github.com/tinylib/msgp v1.1.6 // indirect
github.com/twmb/murmur3 v1.1.3 // indirect github.com/twmb/murmur3 v1.1.3 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
@ -225,13 +225,14 @@ require (
github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect
go.etcd.io/bbolt v1.3.6 // indirect go.etcd.io/bbolt v1.3.6 // indirect
go.etcd.io/etcd/api/v3 v3.5.6 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.22.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/term v0.3.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/term v0.5.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

770
go.sum
File diff suppressed because it is too large
View File

13
helm/index.yaml

@ -0,0 +1,13 @@
apiVersion: v1
entries:
seaweedfs:
- apiVersion: v1
appVersion: "3.43"
created: "2023-02-21T01:05:06.654751634Z"
description: SeaweedFS
digest: b8b9071dd8624a06d47b865a0a0e64d1093c2f0406ede47f40019fe9ea7e82a5
name: seaweedfs
urls:
- https://seaweedfs.github.io/seaweedfs/helm/seaweedfs-3.43.tgz
version: "3.43"
generated: "2023-02-21T01:05:06.652866698Z"

BIN
helm/seaweedfs-3.43.tgz

0
k8s/helm_charts2/.helmignore → k8s/charts/seaweedfs/.helmignore

4
k8s/helm_charts2/Chart.yaml → k8s/charts/seaweedfs/Chart.yaml

@ -1,5 +1,5 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
appVersion: "3.37"
version: "3.37"
appVersion: "3.43"
version: "3.43"

14
k8s/helm_charts2/README.md → k8s/charts/seaweedfs/README.md

@ -14,13 +14,13 @@ with ENV.
A running MySQL-compatible database is expected by default, as specified in the `values.yaml` at `filer.extraEnvironmentVars`. A running MySQL-compatible database is expected by default, as specified in the `values.yaml` at `filer.extraEnvironmentVars`.
This database should be pre-configured and initialized by running: This database should be pre-configured and initialized by running:
```sql ```sql
CREATE TABLE IF NOT EXISTS filemeta (
dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
name VARCHAR(1000) BINARY COMMENT 'directory or file name',
directory TEXT BINARY COMMENT 'full path to parent directory',
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `filemeta` (
`dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
`name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
`directory` TEXT NOT NULL COMMENT 'full path to parent directory',
`meta` LONGBLOB,
PRIMARY KEY (`dirhash`, `name`)
) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
``` ```
Alternative database can also be configured (e.g. leveldb) following the instructions at `filer.extraEnvironmentVars`. Alternative database can also be configured (e.g. leveldb) following the instructions at `filer.extraEnvironmentVars`.

0
k8s/helm_charts2/dashboards/seaweedfs-grafana-dashboard.json → k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json

0
k8s/helm_charts2/templates/_helpers.tpl → k8s/charts/seaweedfs/templates/_helpers.tpl

0
k8s/helm_charts2/templates/ca-cert.yaml → k8s/charts/seaweedfs/templates/ca-cert.yaml

0
k8s/helm_charts2/templates/cert-clusterissuer.yaml → k8s/charts/seaweedfs/templates/cert-clusterissuer.yaml

0
k8s/helm_charts2/templates/client-cert.yaml → k8s/charts/seaweedfs/templates/client-cert.yaml

0
k8s/helm_charts2/templates/filer-cert.yaml → k8s/charts/seaweedfs/templates/filer-cert.yaml

0
k8s/helm_charts2/templates/filer-service-client.yaml → k8s/charts/seaweedfs/templates/filer-service-client.yaml

0
k8s/helm_charts2/templates/filer-service.yaml → k8s/charts/seaweedfs/templates/filer-service.yaml

0
k8s/helm_charts2/templates/filer-servicemonitor.yaml → k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml

2
k8s/helm_charts2/templates/filer-statefulset.yaml → k8s/charts/seaweedfs/templates/filer-statefulset.yaml

@ -162,7 +162,7 @@ spec:
-s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \ -s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \
{{- end }} {{- end }}
{{- end }} {{- end }}
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
volumeMounts: volumeMounts:
- name: seaweedfs-filer-log-volume - name: seaweedfs-filer-log-volume
mountPath: "/logs/" mountPath: "/logs/"

0
k8s/helm_charts2/templates/ingress.yaml → k8s/charts/seaweedfs/templates/ingress.yaml

0
k8s/helm_charts2/templates/master-cert.yaml → k8s/charts/seaweedfs/templates/master-cert.yaml

0
k8s/helm_charts2/templates/master-service.yaml → k8s/charts/seaweedfs/templates/master-service.yaml

0
k8s/helm_charts2/templates/master-servicemonitor.yaml → k8s/charts/seaweedfs/templates/master-servicemonitor.yaml

4
k8s/helm_charts2/templates/master-statefulset.yaml → k8s/charts/seaweedfs/templates/master-statefulset.yaml

@ -133,8 +133,8 @@ spec:
{{- if .Values.master.garbageThreshold }} {{- if .Values.master.garbageThreshold }}
-garbageThreshold={{ .Values.master.garbageThreshold }} \ -garbageThreshold={{ .Values.master.garbageThreshold }} \
{{- end }} {{- end }}
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master.{{ .Release.Namespace }} \
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
volumeMounts: volumeMounts:
- name : data-{{ .Release.Namespace }} - name : data-{{ .Release.Namespace }}
mountPath: /data mountPath: /data

2
k8s/helm_charts2/templates/s3-deployment.yaml → k8s/charts/seaweedfs/templates/s3-deployment.yaml

@ -105,7 +105,7 @@ spec:
{{- if .Values.s3.auditLogConfig }} {{- if .Values.s3.auditLogConfig }}
-auditLogConfig=/etc/sw/s3_auditLogConfig.json \ -auditLogConfig=/etc/sw/s3_auditLogConfig.json \
{{- end }} {{- end }}
-filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }}
-filer={{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }}
volumeMounts: volumeMounts:
- name: logs - name: logs
mountPath: "/logs/" mountPath: "/logs/"

0
k8s/helm_charts2/templates/s3-service.yaml → k8s/charts/seaweedfs/templates/s3-service.yaml

0
k8s/helm_charts2/templates/s3-servicemonitor.yaml → k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml

0
k8s/helm_charts2/templates/seaweedfs-grafana-dashboard.yaml → k8s/charts/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml

0
k8s/helm_charts2/templates/seaweedfs-s3-secret.yaml → k8s/charts/seaweedfs/templates/seaweedfs-s3-secret.yaml

0
k8s/helm_charts2/templates/secret-seaweedfs-db.yaml → k8s/charts/seaweedfs/templates/secret-seaweedfs-db.yaml

0
k8s/helm_charts2/templates/security-configmap.yaml → k8s/charts/seaweedfs/templates/security-configmap.yaml

0
k8s/helm_charts2/templates/service-account.yaml → k8s/charts/seaweedfs/templates/service-account.yaml

0
k8s/helm_charts2/templates/volume-cert.yaml → k8s/charts/seaweedfs/templates/volume-cert.yaml

0
k8s/helm_charts2/templates/volume-service.yaml → k8s/charts/seaweedfs/templates/volume-service.yaml

0
k8s/helm_charts2/templates/volume-servicemonitor.yaml → k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml

4
k8s/helm_charts2/templates/volume-statefulset.yaml → k8s/charts/seaweedfs/templates/volume-statefulset.yaml

@ -138,9 +138,9 @@ spec:
-fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \ -fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \
{{- end }} {{- end }}
-minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \ -minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume.{{ .Release.Namespace }} \
-compactionMBps={{ .Values.volume.compactionMBps }} \ -compactionMBps={{ .Values.volume.compactionMBps }} \
-mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
-mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
volumeMounts: volumeMounts:
- name: data - name: data
mountPath: "{{ .Values.volume.dir }}/" mountPath: "{{ .Values.volume.dir }}/"

4
k8s/helm_charts2/values.yaml → k8s/charts/seaweedfs/values.yaml

@ -22,8 +22,8 @@ global:
replicationPlacment: "001" replicationPlacment: "001"
extraEnvironmentVars: extraEnvironmentVars:
WEED_CLUSTER_DEFAULT: "sw" WEED_CLUSTER_DEFAULT: "sw"
WEED_CLUSTER_SW_MASTER: "seaweedfs-master:9333"
WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client:8888"
WEED_CLUSTER_SW_MASTER: "seaweedfs-master.seaweedfs:9333"
WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client.seaweedfs:8888"
image: image:
registry: "" registry: ""

4
other/java/s3copier/pom.xml

@ -5,6 +5,10 @@
<artifactId>copier</artifactId> <artifactId>copier</artifactId>
<packaging>jar</packaging> <packaging>jar</packaging>
<version>1.0-SNAPSHOT</version> <version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>18</maven.compiler.source>
<maven.compiler.target>18</maven.compiler.target>
</properties>
<name>copier</name> <name>copier</name>
<url>http://maven.apache.org</url> <url>http://maven.apache.org</url>

15
other/java/s3copier/src/main/java/com/seaweedfs/s3/HighLevelMultipartUpload.java

@ -10,6 +10,8 @@ import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.regions.Regions; import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.CreateBucketRequest;
import com.amazonaws.services.s3.model.GetBucketLocationRequest;
import com.amazonaws.services.s3.transfer.TransferManager; import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import com.amazonaws.services.s3.transfer.Upload; import com.amazonaws.services.s3.transfer.Upload;
@ -42,6 +44,19 @@ public class HighLevelMultipartUpload {
.withS3Client(s3Client) .withS3Client(s3Client)
.build(); .build();
if (!s3Client.doesBucketExistV2(bucketName)) {
// Because the CreateBucketRequest object doesn't specify a region, the
// bucket is created in the region specified in the client.
s3Client.createBucket(new CreateBucketRequest(bucketName));
// Verify that the bucket was created by retrieving it and checking its location.
String bucketLocation = s3Client.getBucketLocation(new GetBucketLocationRequest(bucketName));
System.out.println("Bucket location: " + bucketLocation);
} else {
System.out.println("Bucket already exists");
}
// TransferManager processes all transfers asynchronously, // TransferManager processes all transfers asynchronously,
// so this call returns immediately. // so this call returns immediately.
Upload upload = tm.upload(bucketName, keyName, file); Upload upload = tm.upload(bucketName, keyName, file);

3
unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go

@ -6,6 +6,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"strconv" "strconv"
@ -52,7 +53,7 @@ func main() {
} }
func startGenerateMetadata() { func startGenerateMetadata() {
pb.WithFilerClient(false, pb.ServerAddress(*tailFiler), grpc.WithTransportCredentials(insecure.NewCredentials()), func(client filer_pb.SeaweedFilerClient) error {
pb.WithFilerClient(false, util.RandomInt32(), pb.ServerAddress(*tailFiler), grpc.WithTransportCredentials(insecure.NewCredentials()), func(client filer_pb.SeaweedFilerClient) error {
for i := 0; i < *n; i++ { for i := 0; i < *n; i++ {
name := fmt.Sprintf("file%d", i) name := fmt.Sprintf("file%d", i)

1
weed/command/filer.go

@ -113,6 +113,7 @@ func init() {
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB") filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB")
filerWebDavOptions.filerRootPath = cmdFiler.Flag.String("webdav.filer.path", "/", "use this remote path from filer server")
// start iam on filer // start iam on filer
filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service") filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service")

2
weed/command/filer_cat.go

@ -96,7 +96,7 @@ func runFilerCat(cmd *Command, args []string) bool {
writer = f writer = f
} }
pb.WithFilerClient(false, filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
pb.WithFilerClient(false, util.RandomInt32(), filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{ request := &filer_pb.LookupDirectoryEntryRequest{
Name: name, Name: name,

20
weed/command/filer_copy.go

@ -159,6 +159,7 @@ func runCopy(cmd *Command, args []string) bool {
worker := FileCopyWorker{ worker := FileCopyWorker{
options: &copy, options: &copy,
filerAddress: filerAddress, filerAddress: filerAddress,
signature: util.RandomInt32(),
} }
if err := worker.copyFiles(fileCopyTaskChan); err != nil { if err := worker.copyFiles(fileCopyTaskChan); err != nil {
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
@ -172,7 +173,7 @@ func runCopy(cmd *Command, args []string) bool {
} }
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress pb.ServerAddress) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) { func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress pb.ServerAddress) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) {
err = pb.WithGrpcFilerClient(false, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
err = pb.WithGrpcFilerClient(false, 0, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil { if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
@ -225,6 +226,7 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
type FileCopyWorker struct { type FileCopyWorker struct {
options *CopyOptions options *CopyOptions
filerAddress pb.ServerAddress filerAddress pb.ServerAddress
signature int32
} }
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error { func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
@ -302,7 +304,7 @@ func (worker *FileCopyWorker) checkExistingFileFirst(task FileCopyTask, f *os.Fi
return return
} }
err = pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
err = pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{ request := &filer_pb.LookupDirectoryEntryRequest{
Directory: task.destinationUrlPath, Directory: task.destinationUrlPath,
@ -365,10 +367,10 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
if flushErr != nil { if flushErr != nil {
return flushErr return flushErr
} }
chunks = append(chunks, uploadResult.ToPbFileChunk(finalFileId, 0))
chunks = append(chunks, uploadResult.ToPbFileChunk(finalFileId, 0, time.Now().UnixNano()))
} }
if err := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err := pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath, Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
@ -450,7 +452,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
uploadError = fmt.Errorf("upload %v result: %v\n", fileName, uploadResult.Error) uploadError = fmt.Errorf("upload %v result: %v\n", fileName, uploadResult.Error)
return return
} }
chunksChan <- uploadResult.ToPbFileChunk(fileId, i*chunkSize)
chunksChan <- uploadResult.ToPbFileChunk(fileId, i*chunkSize, time.Now().UnixNano())
fmt.Printf("uploaded %s-%d [%d,%d)\n", fileName, i+1, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) fmt.Printf("uploaded %s-%d [%d,%d)\n", fileName, i+1, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
}(i) }(i)
@ -479,7 +481,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
return fmt.Errorf("create manifest: %v", manifestErr) return fmt.Errorf("create manifest: %v", manifestErr)
} }
if err := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err := pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath, Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
@ -530,7 +532,7 @@ func detectMimeType(f *os.File) string {
return mimeType return mimeType
} }
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, err error) {
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
finalFileId, uploadResult, flushErr, _ := operation.UploadWithRetry( finalFileId, uploadResult, flushErr, _ := operation.UploadWithRetry(
worker, worker,
@ -561,7 +563,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
if uploadResult.Error != "" { if uploadResult.Error != "" {
return nil, fmt.Errorf("upload result: %v", uploadResult.Error) return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
} }
return uploadResult.ToPbFileChunk(finalFileId, offset), nil
return uploadResult.ToPbFileChunk(finalFileId, offset, tsNs), nil
} }
var _ = filer_pb.FilerClient(&FileCopyWorker{}) var _ = filer_pb.FilerClient(&FileCopyWorker{})
@ -569,7 +571,7 @@ var _ = filer_pb.FilerClient(&FileCopyWorker{})
func (worker *FileCopyWorker) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) (err error) { func (worker *FileCopyWorker) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) (err error) {
filerGrpcAddress := worker.filerAddress.ToGrpcAddress() filerGrpcAddress := worker.filerAddress.ToGrpcAddress()
err = pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error {
err = pb.WithGrpcClient(streamingMode, worker.signature, func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection) client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client) return fn(client)
}, filerGrpcAddress, false, worker.options.grpcDialOption) }, filerGrpcAddress, false, worker.options.grpcDialOption)

2
weed/command/filer_meta_backup.go

@ -225,7 +225,7 @@ var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{})
func (metaBackup *FilerMetaBackupOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { func (metaBackup *FilerMetaBackupOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error {
return pb.WithFilerClient(streamingMode, pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return pb.WithFilerClient(streamingMode, metaBackup.clientId, pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return fn(client) return fn(client)
}) })

2
weed/command/filer_remote_gateway.go

@ -35,7 +35,7 @@ type RemoteGatewayOptions struct {
var _ = filer_pb.FilerClient(&RemoteGatewayOptions{}) var _ = filer_pb.FilerClient(&RemoteGatewayOptions{})
func (option *RemoteGatewayOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { func (option *RemoteGatewayOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error {
return pb.WithFilerClient(streamingMode, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return pb.WithFilerClient(streamingMode, option.clientId, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return fn(client) return fn(client)
}) })
} }

2
weed/command/filer_remote_sync.go

@ -26,7 +26,7 @@ type RemoteSyncOptions struct {
var _ = filer_pb.FilerClient(&RemoteSyncOptions{}) var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
func (option *RemoteSyncOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { func (option *RemoteSyncOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error {
return pb.WithFilerClient(streamingMode, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return pb.WithFilerClient(streamingMode, option.clientId, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return fn(client) return fn(client)
}) })
} }

4
weed/command/filer_sync.go

@ -304,7 +304,7 @@ func getSignaturePrefixByPath(path string) string {
func getOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) { func getOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) {
readErr = pb.WithFilerClient(false, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
readErr = pb.WithFilerClient(false, signature, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
syncKey := []byte(signaturePrefix + "____") syncKey := []byte(signaturePrefix + "____")
util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature))
@ -330,7 +330,7 @@ func getOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signature
} }
func setOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32, offsetTsNs int64) error { func setOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32, offsetTsNs int64) error {
return pb.WithFilerClient(false, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
return pb.WithFilerClient(false, signature, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
syncKey := []byte(signaturePrefix + "____") syncKey := []byte(signaturePrefix + "____")
util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature))

2
weed/command/iam.go

@ -50,7 +50,7 @@ func (iamopt *IamOptions) startIamServer() bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for { for {
err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil { if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)

4
weed/command/mount.go

@ -53,9 +53,9 @@ func init() {
mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files") mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers if not 0")
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers")
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 0, "local file chunk cache capacity in MB")
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 0, "file chunk read cache capacity in MB")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")

3
weed/command/mount_std.go

@ -256,7 +256,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
seaweedFileSystem.StartBackgroundTasks() seaweedFileSystem.StartBackgroundTasks()
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir)
glog.V(0).Infof("This is SeaweedFS version %s %s %s", util.Version(), runtime.GOOS, runtime.GOARCH)
server.Serve() server.Serve()

2
weed/command/s3.go

@ -163,7 +163,7 @@ func (s3opt *S3Options) startS3Server() bool {
var metricsIntervalSec int var metricsIntervalSec int
for { for {
err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil { if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)

30
weed/command/scaffold/filer.toml

@ -41,13 +41,13 @@ enabled = false
dbFile = "./filer.db" # sqlite db file dbFile = "./filer.db" # sqlite db file
[mysql] # or memsql, tidb [mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
# directory TEXT BINARY COMMENT 'full path to parent directory',
# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
# CREATE TABLE IF NOT EXISTS `filemeta` (
# `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
# `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
# `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
# `meta` LONGBLOB,
# PRIMARY KEY (`dirhash`, `name`)
# ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
enabled = false enabled = false
hostname = "localhost" hostname = "localhost"
@ -61,18 +61,18 @@ connection_max_lifetime_seconds = 0
interpolateParams = false interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true enableUpsert = true
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
[mysql2] # or memsql, tidb [mysql2] # or memsql, tidb
enabled = false enabled = false
createTable = """ createTable = """
CREATE TABLE IF NOT EXISTS `%s` ( CREATE TABLE IF NOT EXISTS `%s` (
dirhash BIGINT,
name VARCHAR(1000) BINARY,
directory TEXT BINARY,
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
`dirhash` BIGINT NOT NULL,
`name` VARCHAR(766) NOT NULL,
`directory` TEXT NOT NULL,
`meta` LONGBLOB,
PRIMARY KEY (`dirhash`, `name`)
) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
""" """
hostname = "localhost" hostname = "localhost"
port = 3306 port = 3306
@ -85,7 +85,7 @@ connection_max_lifetime_seconds = 0
interpolateParams = false interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true enableUpsert = true
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
[postgres] # or cockroachdb, YugabyteDB [postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta ( # CREATE TABLE IF NOT EXISTS filemeta (

1
weed/command/scaffold/replication.toml

@ -68,6 +68,7 @@ is_incremental = false
enabled = false enabled = false
b2_account_id = "" b2_account_id = ""
b2_master_application_key = "" b2_master_application_key = ""
b2_region = ""
bucket = "mybucket" # an existing bucket bucket = "mybucket" # an existing bucket
directory = "/" # destination directory directory = "/" # destination directory
is_incremental = false is_incremental = false

1
weed/command/server.go

@ -156,6 +156,7 @@ func init() {
webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB") webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB")
webdavOptions.filerRootPath = cmdServer.Flag.String("webdav.filer.path", "/", "use this remote path from filer server")
mqBrokerOptions.port = cmdServer.Flag.Int("mq.broker.port", 17777, "message queue broker gRPC listen port") mqBrokerOptions.port = cmdServer.Flag.Int("mq.broker.port", 17777, "message queue broker gRPC listen port")

2
weed/command/webdav.go

@ -87,7 +87,7 @@ func (wo *WebDavOption) startWebDav() bool {
var cipher bool var cipher bool
// connect to filer // connect to filer
for { for {
err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil { if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)

2
weed/filer/configuration.go

@ -33,7 +33,7 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) {
if !hasDefaultStoreConfigured { if !hasDefaultStoreConfigured {
println() println()
println("Supported filer stores are:")
println("Supported filer stores are the following. If not found, check the full version.")
for _, store := range Stores { for _, store := range Stores {
println(" " + store.GetName()) println(" " + store.GetName())
} }

155
weed/filer/filechunk_group.go

@ -0,0 +1,155 @@
package filer
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"sync"
)
type ChunkGroup struct {
lookupFn wdclient.LookupFileIdFunctionType
chunkCache chunk_cache.ChunkCache
sections map[SectionIndex]*FileChunkSection
sectionsLock sync.RWMutex
readerCache *ReaderCache
}
func NewChunkGroup(lookupFn wdclient.LookupFileIdFunctionType, chunkCache chunk_cache.ChunkCache, chunks []*filer_pb.FileChunk) (*ChunkGroup, error) {
group := &ChunkGroup{
lookupFn: lookupFn,
chunkCache: chunkCache,
sections: make(map[SectionIndex]*FileChunkSection),
readerCache: NewReaderCache(32, chunkCache, lookupFn),
}
err := group.SetChunks(chunks)
return group, err
}
func (group *ChunkGroup) AddChunk(chunk *filer_pb.FileChunk) error {
group.sectionsLock.Lock()
defer group.sectionsLock.Unlock()
sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize)
for si := sectionIndexStart; si < sectionIndexStop+1; si++ {
section, found := group.sections[si]
if !found {
section = NewFileChunkSection(si)
group.sections[si] = section
}
section.addChunk(chunk)
}
return nil
}
func (group *ChunkGroup) ReadDataAt(fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) {
group.sectionsLock.RLock()
defer group.sectionsLock.RUnlock()
sectionIndexStart, sectionIndexStop := SectionIndex(offset/SectionSize), SectionIndex((offset+int64(len(buff)))/SectionSize)
for si := sectionIndexStart; si < sectionIndexStop+1; si++ {
section, found := group.sections[si]
rangeStart, rangeStop := max(offset, int64(si*SectionSize)), min(offset+int64(len(buff)), int64((si+1)*SectionSize))
if !found {
for i := rangeStart; i < rangeStop; i++ {
buff[i-offset] = 0
}
continue
}
xn, xTsNs, xErr := section.readDataAt(group, fileSize, buff[rangeStart-offset:rangeStop-offset], rangeStart)
if xErr != nil {
err = xErr
}
n += xn
tsNs = max(tsNs, xTsNs)
}
return
}
func (group *ChunkGroup) SetChunks(chunks []*filer_pb.FileChunk) error {
group.sectionsLock.RLock()
defer group.sectionsLock.RUnlock()
var dataChunks []*filer_pb.FileChunk
for _, chunk := range chunks {
if !chunk.IsChunkManifest {
dataChunks = append(dataChunks, chunk)
continue
}
resolvedChunks, err := ResolveOneChunkManifest(group.lookupFn, chunk)
if err != nil {
return err
}
dataChunks = append(dataChunks, resolvedChunks...)
}
sections := make(map[SectionIndex]*FileChunkSection)
for _, chunk := range dataChunks {
sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize)
for si := sectionIndexStart; si < sectionIndexStop+1; si++ {
section, found := sections[si]
if !found {
section = NewFileChunkSection(si)
sections[si] = section
}
section.chunks = append(section.chunks, chunk)
}
}
group.sections = sections
return nil
}
const (
// see weedfs_file_lseek.go
SEEK_DATA uint32 = 3 // seek to next data after the offset
// SEEK_HOLE uint32 = 4 // seek to next hole after the offset
)
// FIXME: needa tests
func (group *ChunkGroup) SearchChunks(offset, fileSize int64, whence uint32) (found bool, out int64) {
group.sectionsLock.RLock()
defer group.sectionsLock.RUnlock()
return group.doSearchChunks(offset, fileSize, whence)
}
func (group *ChunkGroup) doSearchChunks(offset, fileSize int64, whence uint32) (found bool, out int64) {
sectionIndex, maxSectionIndex := SectionIndex(offset/SectionSize), SectionIndex(fileSize/SectionSize)
if whence == SEEK_DATA {
for si := sectionIndex; si < maxSectionIndex+1; si++ {
section, foundSection := group.sections[si]
if !foundSection {
continue
}
sectionStart := section.DataStartOffset(group, offset, fileSize)
if sectionStart == -1 {
continue
}
return true, sectionStart
}
return false, 0
} else {
// whence == SEEK_HOLE
for si := sectionIndex; si < maxSectionIndex; si++ {
section, foundSection := group.sections[si]
if !foundSection {
return true, offset
}
holeStart := section.NextStopOffset(group, offset, fileSize)
if holeStart%SectionSize == 0 {
continue
}
return true, holeStart
}
return true, fileSize
}
}

36
weed/filer/filechunk_group_test.go

@ -0,0 +1,36 @@
package filer
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestChunkGroup_doSearchChunks(t *testing.T) {
type fields struct {
sections map[SectionIndex]*FileChunkSection
}
type args struct {
offset int64
fileSize int64
whence uint32
}
tests := []struct {
name string
fields fields
args args
wantFound bool
wantOut int64
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
group := &ChunkGroup{
sections: tt.fields.sections,
}
gotFound, gotOut := group.doSearchChunks(tt.args.offset, tt.args.fileSize, tt.args.whence)
assert.Equalf(t, tt.wantFound, gotFound, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence)
assert.Equalf(t, tt.wantOut, gotOut, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence)
})
}
}

4
weed/filer/filechunk_manifest.go

@ -264,7 +264,7 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer
} }
} }
manifestChunk, err = saveFunc(bytes.NewReader(data), "", 0)
manifestChunk, err = saveFunc(bytes.NewReader(data), "", 0, 0)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -275,4 +275,4 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer
return return
} }
type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, err error)
type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error)

134
weed/filer/filechunk_section.go

@ -0,0 +1,134 @@
package filer
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"sync"
)
const SectionSize = 2 * 1024 * 1024 * 32 // 64MiB
type SectionIndex int64
type FileChunkSection struct {
sectionIndex SectionIndex
chunks []*filer_pb.FileChunk
visibleIntervals *IntervalList[*VisibleInterval]
chunkViews *IntervalList[*ChunkView]
reader *ChunkReadAt
lock sync.Mutex
}
func NewFileChunkSection(si SectionIndex) *FileChunkSection {
return &FileChunkSection{
sectionIndex: si,
}
}
func (section *FileChunkSection) addChunk(chunk *filer_pb.FileChunk) error {
section.lock.Lock()
defer section.lock.Unlock()
start, stop := max(int64(section.sectionIndex)*SectionSize, chunk.Offset), min(((int64(section.sectionIndex)+1)*SectionSize), chunk.Offset+int64(chunk.Size))
section.chunks = append(section.chunks, chunk)
if section.visibleIntervals == nil {
section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize)
} else {
MergeIntoVisibles(section.visibleIntervals, start, stop, chunk)
garbageFileIds := FindGarbageChunks(section.visibleIntervals, start, stop)
removeGarbageChunks(section, garbageFileIds)
}
if section.chunkViews != nil {
MergeIntoChunkViews(section.chunkViews, start, stop, chunk)
}
return nil
}
func removeGarbageChunks(section *FileChunkSection, garbageFileIds map[string]struct{}) {
for i := 0; i < len(section.chunks); {
t := section.chunks[i]
length := len(section.chunks)
if _, found := garbageFileIds[t.FileId]; found {
if i < length-1 {
section.chunks[i] = section.chunks[length-1]
}
section.chunks = section.chunks[:length-1]
} else {
i++
}
}
}
func (section *FileChunkSection) setupForRead(group *ChunkGroup, fileSize int64) {
if section.visibleIntervals == nil {
section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize)
section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks)
if section.reader != nil {
_ = section.reader.Close()
section.reader = nil
}
}
if section.chunkViews == nil {
section.chunkViews = ViewFromVisibleIntervals(section.visibleIntervals, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize)
}
if section.reader == nil {
section.reader = NewChunkReaderAtFromClient(group.readerCache, section.chunkViews, min(int64(section.sectionIndex+1)*SectionSize, fileSize))
}
section.reader.fileSize = fileSize
}
func (section *FileChunkSection) readDataAt(group *ChunkGroup, fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) {
section.lock.Lock()
defer section.lock.Unlock()
section.setupForRead(group, fileSize)
return section.reader.ReadAtWithTime(buff, offset)
}
func (section *FileChunkSection) DataStartOffset(group *ChunkGroup, offset int64, fileSize int64) int64 {
section.lock.Lock()
defer section.lock.Unlock()
section.setupForRead(group, fileSize)
for x := section.visibleIntervals.Front(); x != nil; x = x.Next {
visible := x.Value
if visible.stop <= offset {
continue
}
if offset < visible.start {
return offset
}
return offset
}
return -1
}
func (section *FileChunkSection) NextStopOffset(group *ChunkGroup, offset int64, fileSize int64) int64 {
section.lock.Lock()
defer section.lock.Unlock()
section.setupForRead(group, fileSize)
isAfterOffset := false
for x := section.visibleIntervals.Front(); x != nil; x = x.Next {
visible := x.Value
if !isAfterOffset {
if visible.stop <= offset {
continue
}
isAfterOffset = true
}
if offset < visible.start {
return offset
}
// now visible.start <= offset
if offset < visible.stop {
offset = visible.stop
}
}
return offset
}

48
weed/filer/filechunk_section_test.go

@ -0,0 +1,48 @@
package filer
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"testing"
)
func Test_removeGarbageChunks(t *testing.T) {
section := NewFileChunkSection(0)
section.chunks = append(section.chunks, &filer_pb.FileChunk{
FileId: "0",
Offset: 0,
Size: 1,
ModifiedTsNs: 0,
})
section.chunks = append(section.chunks, &filer_pb.FileChunk{
FileId: "1",
Offset: 1,
Size: 1,
ModifiedTsNs: 1,
})
section.chunks = append(section.chunks, &filer_pb.FileChunk{
FileId: "2",
Offset: 2,
Size: 1,
ModifiedTsNs: 2,
})
section.chunks = append(section.chunks, &filer_pb.FileChunk{
FileId: "3",
Offset: 3,
Size: 1,
ModifiedTsNs: 3,
})
section.chunks = append(section.chunks, &filer_pb.FileChunk{
FileId: "4",
Offset: 4,
Size: 1,
ModifiedTsNs: 4,
})
garbageFileIds := make(map[string]struct{})
garbageFileIds["0"] = struct{}{}
garbageFileIds["2"] = struct{}{}
garbageFileIds["4"] = struct{}{}
removeGarbageChunks(section, garbageFileIds)
if len(section.chunks) != 2 {
t.Errorf("remove chunk 2 failed")
}
}

218
weed/filer/filechunks.go

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/wdclient" "github.com/seaweedfs/seaweedfs/weed/wdclient"
"golang.org/x/exp/slices"
"math" "math"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -66,8 +65,15 @@ func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64) visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
compacted, garbage = SeparateGarbageChunks(visibles, chunks)
return
}
func SeparateGarbageChunks(visibles *IntervalList[*VisibleInterval], chunks []*filer_pb.FileChunk) (compacted []*filer_pb.FileChunk, garbage []*filer_pb.FileChunk) {
fileIds := make(map[string]bool) fileIds := make(map[string]bool)
for _, interval := range visibles {
for x := visibles.Front(); x != nil; x = x.Next {
interval := x.Value
fileIds[interval.fileId] = true fileIds[interval.fileId] = true
} }
for _, chunk := range chunks { for _, chunk := range chunks {
@ -77,7 +83,18 @@ func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks
garbage = append(garbage, chunk) garbage = append(garbage, chunk)
} }
} }
return compacted, garbage
}
func FindGarbageChunks(visibles *IntervalList[*VisibleInterval], start int64, stop int64) (garbageFileIds map[string]struct{}) {
garbageFileIds = make(map[string]struct{})
for x := visibles.Front(); x != nil; x = x.Next {
interval := x.Value
offset := interval.start - interval.offsetInChunk
if start <= offset && offset+int64(interval.chunkSize) <= stop {
garbageFileIds[interval.fileId] = struct{}{}
}
}
return return
} }
@ -132,19 +149,38 @@ func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_p
type ChunkView struct { type ChunkView struct {
FileId string FileId string
Offset int64
Size uint64
LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
OffsetInChunk int64 // offset within the chunk
ViewSize uint64
ViewOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
ChunkSize uint64 ChunkSize uint64
CipherKey []byte CipherKey []byte
IsGzipped bool IsGzipped bool
ModifiedTsNs int64
}
func (cv *ChunkView) SetStartStop(start, stop int64) {
cv.OffsetInChunk += start - cv.ViewOffset
cv.ViewOffset = start
cv.ViewSize = uint64(stop - start)
}
func (cv *ChunkView) Clone() IntervalValue {
return &ChunkView{
FileId: cv.FileId,
OffsetInChunk: cv.OffsetInChunk,
ViewSize: cv.ViewSize,
ViewOffset: cv.ViewOffset,
ChunkSize: cv.ChunkSize,
CipherKey: cv.CipherKey,
IsGzipped: cv.IsGzipped,
ModifiedTsNs: cv.ModifiedTsNs,
}
} }
func (cv *ChunkView) IsFullChunk() bool { func (cv *ChunkView) IsFullChunk() bool {
return cv.Size == cv.ChunkSize
return cv.ViewSize == cv.ChunkSize
} }
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size) visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
@ -152,7 +188,7 @@ func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*
} }
func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
func ViewFromVisibleIntervals(visibles *IntervalList[*VisibleInterval], offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) {
stop := offset + size stop := offset + size
if size == math.MaxInt64 { if size == math.MaxInt64 {
@ -162,138 +198,82 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int
stop = math.MaxInt64 stop = math.MaxInt64
} }
for _, chunk := range visibles {
chunkViews = NewIntervalList[*ChunkView]()
for x := visibles.Front(); x != nil; x = x.Next {
chunk := x.Value
chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop) chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
if chunkStart < chunkStop { if chunkStart < chunkStop {
views = append(views, &ChunkView{
chunkView := &ChunkView{
FileId: chunk.fileId, FileId: chunk.fileId,
Offset: chunkStart - chunk.start + chunk.chunkOffset,
Size: uint64(chunkStop - chunkStart),
LogicOffset: chunkStart,
OffsetInChunk: chunkStart - chunk.start + chunk.offsetInChunk,
ViewSize: uint64(chunkStop - chunkStart),
ViewOffset: chunkStart,
ChunkSize: chunk.chunkSize, ChunkSize: chunk.chunkSize,
CipherKey: chunk.cipherKey, CipherKey: chunk.cipherKey,
IsGzipped: chunk.isGzipped, IsGzipped: chunk.isGzipped,
ModifiedTsNs: chunk.modifiedTsNs,
}
chunkViews.AppendInterval(&Interval[*ChunkView]{
StartOffset: chunkStart,
StopOffset: chunkStop,
TsNs: chunk.modifiedTsNs,
Value: chunkView,
Prev: nil,
Next: nil,
}) })
} }
} }
return views
return chunkViews
} }
func logPrintf(name string, visibles []VisibleInterval) {
func MergeIntoVisibles(visibles *IntervalList[*VisibleInterval], start int64, stop int64, chunk *filer_pb.FileChunk) {
/*
glog.V(0).Infof("%s len %d", name, len(visibles))
for _, v := range visibles {
glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
newV := &VisibleInterval{
start: start,
stop: stop,
fileId: chunk.GetFileIdString(),
modifiedTsNs: chunk.ModifiedTsNs,
offsetInChunk: start - chunk.Offset, // the starting position in the chunk
chunkSize: chunk.Size, // size of the chunk
cipherKey: chunk.CipherKey,
isGzipped: chunk.IsCompressed,
} }
*/
}
func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.ModifiedTsNs, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
visibles.InsertInterval(start, stop, chunk.ModifiedTsNs, newV)
}
length := len(visibles)
if length == 0 {
return append(visibles, newV)
}
last := visibles[length-1]
if last.stop <= chunk.Offset {
return append(visibles, newV)
}
func MergeIntoChunkViews(chunkViews *IntervalList[*ChunkView], start int64, stop int64, chunk *filer_pb.FileChunk) {
logPrintf(" before", visibles)
// glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
chunkStop := chunk.Offset + int64(chunk.Size)
for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop {
t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTsNs, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
chunkView := &ChunkView{
FileId: chunk.GetFileIdString(),
OffsetInChunk: start - chunk.Offset,
ViewSize: uint64(stop - start),
ViewOffset: start,
ChunkSize: chunk.Size,
CipherKey: chunk.CipherKey,
IsGzipped: chunk.IsCompressed,
ModifiedTsNs: chunk.ModifiedTsNs,
} }
if v.start < chunkStop && chunkStop < v.stop {
t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTsNs, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
}
if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v)
// glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
}
}
newVisibles = append(newVisibles, newV)
logPrintf(" append", newVisibles)
for i := len(newVisibles) - 1; i >= 0; i-- {
if i > 0 && newV.start < newVisibles[i-1].start {
newVisibles[i] = newVisibles[i-1]
} else {
newVisibles[i] = newV
break
}
}
logPrintf(" sorted", newVisibles)
return newVisibles
chunkViews.InsertInterval(start, stop, chunk.ModifiedTsNs, chunkView)
} }
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
// If the file chunk content is a chunk manifest // If the file chunk content is a chunk manifest
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) {
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval], err error) {
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset) chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
if err != nil { if err != nil {
return return
} }
visibles2 := readResolvedChunks(chunks)
visibles2 := readResolvedChunks(chunks, 0, math.MaxInt64)
if true {
return visibles2, err return visibles2, err
}
slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool {
if a.ModifiedTsNs == b.ModifiedTsNs {
filer_pb.EnsureFid(a)
filer_pb.EnsureFid(b)
if a.Fid == nil || b.Fid == nil {
return true
}
return a.Fid.FileKey < b.Fid.FileKey
}
return a.ModifiedTsNs < b.ModifiedTsNs
})
for _, chunk := range chunks {
// glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
visibles = MergeIntoVisibles(visibles, chunk)
logPrintf("add", visibles)
}
if len(visibles) != len(visibles2) {
fmt.Printf("different visibles size %d : %d\n", len(visibles), len(visibles2))
} else {
for i := 0; i < len(visibles); i++ {
checkDifference(visibles[i], visibles2[i])
}
}
return
}
func checkDifference(x, y VisibleInterval) {
if x.start != y.start ||
x.stop != y.stop ||
x.fileId != y.fileId ||
x.modifiedTsNs != y.modifiedTsNs {
fmt.Printf("different visible %+v : %+v\n", x, y)
}
} }
// find non-overlapping visible intervals // find non-overlapping visible intervals
@ -304,22 +284,26 @@ type VisibleInterval struct {
stop int64 stop int64
modifiedTsNs int64 modifiedTsNs int64
fileId string fileId string
chunkOffset int64
offsetInChunk int64
chunkSize uint64 chunkSize uint64
cipherKey []byte cipherKey []byte
isGzipped bool isGzipped bool
} }
func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
return VisibleInterval{
start: start,
stop: stop,
fileId: fileId,
modifiedTsNs: modifiedTime,
chunkOffset: chunkOffset, // the starting position in the chunk
chunkSize: chunkSize,
cipherKey: cipherKey,
isGzipped: isGzipped,
func (v *VisibleInterval) SetStartStop(start, stop int64) {
v.offsetInChunk += start - v.start
v.start, v.stop = start, stop
}
func (v *VisibleInterval) Clone() IntervalValue {
return &VisibleInterval{
start: v.start,
stop: v.stop,
modifiedTsNs: v.modifiedTsNs,
fileId: v.fileId,
offsetInChunk: v.offsetInChunk,
chunkSize: v.chunkSize,
cipherKey: v.cipherKey,
isGzipped: v.isGzipped,
} }
} }

90
weed/filer/filechunks_read.go

@ -1,14 +1,22 @@
package filer package filer
import ( import (
"container/list"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) {
func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval]) {
var points []*Point var points []*Point
for _, chunk := range chunks { for _, chunk := range chunks {
if chunk.IsChunkManifest {
println("This should not happen! A manifest chunk found:", chunk.GetFileIdString())
}
start, stop := max(chunk.Offset, startOffset), min(chunk.Offset+int64(chunk.Size), stopOffset)
if start >= stop {
continue
}
points = append(points, &Point{ points = append(points, &Point{
x: chunk.Offset, x: chunk.Offset,
ts: chunk.ModifiedTsNs, ts: chunk.ModifiedTsNs,
@ -33,40 +41,45 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva
}) })
var prevX int64 var prevX int64
var queue []*Point
queue := list.New() // points with higher ts are at the tail
visibles = NewIntervalList[*VisibleInterval]()
var prevPoint *Point
for _, point := range points { for _, point := range points {
if queue.Len() > 0 {
prevPoint = queue.Back().Value.(*Point)
} else {
prevPoint = nil
}
if point.isStart { if point.isStart {
if len(queue) > 0 {
lastIndex := len(queue) - 1
lastPoint := queue[lastIndex]
if point.x != prevX && lastPoint.ts < point.ts {
visibles = addToVisibles(visibles, prevX, lastPoint, point)
if prevPoint != nil {
if point.x != prevX && prevPoint.ts < point.ts {
addToVisibles(visibles, prevX, prevPoint, point)
prevX = point.x prevX = point.x
} }
} }
// insert into queue // insert into queue
for i := len(queue); i >= 0; i-- {
if i == 0 || queue[i-1].ts <= point.ts {
if i == len(queue) {
if prevPoint == nil || prevPoint.ts < point.ts {
queue.PushBack(point)
prevX = point.x prevX = point.x
}
queue = addToQueue(queue, i, point)
} else {
for e := queue.Front(); e != nil; e = e.Next() {
if e.Value.(*Point).ts > point.ts {
queue.InsertBefore(point, e)
break break
} }
} }
}
} else { } else {
lastIndex := len(queue) - 1
index := lastIndex
var startPoint *Point
for ; index >= 0; index-- {
startPoint = queue[index]
if startPoint.ts == point.ts {
queue = removeFromQueue(queue, index)
isLast := true
for e := queue.Back(); e != nil; e = e.Prev() {
if e.Value.(*Point).ts == point.ts {
queue.Remove(e)
break break
} }
isLast = false
} }
if index == lastIndex && startPoint != nil {
visibles = addToVisibles(visibles, prevX, startPoint, point)
if isLast && prevPoint != nil {
addToVisibles(visibles, prevX, prevPoint, point)
prevX = point.x prevX = point.x
} }
} }
@ -75,37 +88,30 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva
return return
} }
func removeFromQueue(queue []*Point, index int) []*Point {
for i := index; i < len(queue)-1; i++ {
queue[i] = queue[i+1]
}
queue = queue[:len(queue)-1]
return queue
}
func addToQueue(queue []*Point, index int, point *Point) []*Point {
queue = append(queue, point)
for i := len(queue) - 1; i > index; i-- {
queue[i], queue[i-1] = queue[i-1], queue[i]
}
return queue
}
func addToVisibles(visibles []VisibleInterval, prevX int64, startPoint *Point, point *Point) []VisibleInterval {
func addToVisibles(visibles *IntervalList[*VisibleInterval], prevX int64, startPoint *Point, point *Point) {
if prevX < point.x { if prevX < point.x {
chunk := startPoint.chunk chunk := startPoint.chunk
visibles = append(visibles, VisibleInterval{
visible := &VisibleInterval{
start: prevX, start: prevX,
stop: point.x, stop: point.x,
fileId: chunk.GetFileIdString(), fileId: chunk.GetFileIdString(),
modifiedTsNs: chunk.ModifiedTsNs, modifiedTsNs: chunk.ModifiedTsNs,
chunkOffset: prevX - chunk.Offset,
offsetInChunk: prevX - chunk.Offset,
chunkSize: chunk.Size, chunkSize: chunk.Size,
cipherKey: chunk.CipherKey, cipherKey: chunk.CipherKey,
isGzipped: chunk.IsCompressed, isGzipped: chunk.IsCompressed,
})
} }
return visibles
appendVisibleInterfal(visibles, visible)
}
}
func appendVisibleInterfal(visibles *IntervalList[*VisibleInterval], visible *VisibleInterval) {
visibles.AppendInterval(&Interval[*VisibleInterval]{
StartOffset: visible.start,
StopOffset: visible.stop,
TsNs: visible.modifiedTsNs,
Value: visible,
})
} }
type Point struct { type Point struct {

86
weed/filer/filechunks_read_test.go

@ -3,6 +3,7 @@ package filer
import ( import (
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"math"
"math/rand" "math/rand"
"testing" "testing"
) )
@ -42,9 +43,38 @@ func TestReadResolvedChunks(t *testing.T) {
}, },
} }
visibles := readResolvedChunks(chunks)
visibles := readResolvedChunks(chunks, 0, math.MaxInt64)
for _, visible := range visibles {
fmt.Printf("resolved to %d visible intervales\n", visibles.Len())
for x := visibles.Front(); x != nil; x = x.Next {
visible := x.Value
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs)
}
}
func TestReadResolvedChunks2(t *testing.T) {
chunks := []*filer_pb.FileChunk{
{
FileId: "c",
Offset: 200,
Size: 50,
ModifiedTsNs: 3,
},
{
FileId: "e",
Offset: 200,
Size: 25,
ModifiedTsNs: 5,
},
}
visibles := readResolvedChunks(chunks, 0, math.MaxInt64)
fmt.Printf("resolved to %d visible intervales\n", visibles.Len())
for x := visibles.Front(); x != nil; x = x.Next {
visible := x.Value
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs)
} }
@ -72,9 +102,10 @@ func TestRandomizedReadResolvedChunks(t *testing.T) {
chunks = append(chunks, randomWrite(array, start, size, ts)) chunks = append(chunks, randomWrite(array, start, size, ts))
} }
visibles := readResolvedChunks(chunks)
visibles := readResolvedChunks(chunks, 0, math.MaxInt64)
for _, visible := range visibles {
for x := visibles.Front(); x != nil; x = x.Next {
visible := x.Value
for i := visible.start; i < visible.stop; i++ { for i := visible.start; i < visible.stop; i++ {
if array[i] != visible.modifiedTsNs { if array[i] != visible.modifiedTsNs {
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTsNs) t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTsNs)
@ -112,9 +143,9 @@ func TestSequentialReadResolvedChunks(t *testing.T) {
}) })
} }
visibles := readResolvedChunks(chunks)
visibles := readResolvedChunks(chunks, 0, math.MaxInt64)
fmt.Printf("visibles %d", len(visibles))
fmt.Printf("visibles %d", visibles.Len())
} }
@ -201,9 +232,48 @@ func TestActualReadResolvedChunks(t *testing.T) {
}, },
} }
visibles := readResolvedChunks(chunks)
visibles := readResolvedChunks(chunks, 0, math.MaxInt64)
for x := visibles.Front(); x != nil; x = x.Next {
visible := x.Value
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs)
}
}
func TestActualReadResolvedChunks2(t *testing.T) {
chunks := []*filer_pb.FileChunk{
{
FileId: "1,e7b96fef48",
Offset: 0,
Size: 184320,
ModifiedTsNs: 1,
},
{
FileId: "2,22562640b9",
Offset: 184320,
Size: 4096,
ModifiedTsNs: 2,
},
{
FileId: "2,33562640b9",
Offset: 184320,
Size: 4096,
ModifiedTsNs: 4,
},
{
FileId: "4,df033e0fe4",
Offset: 188416,
Size: 2097152,
ModifiedTsNs: 3,
},
}
visibles := readResolvedChunks(chunks, 0, math.MaxInt64)
for _, visible := range visibles {
for x := visibles.Front(); x != nil; x = x.Next {
visible := x.Value
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs)
} }

174
weed/filer/filechunks_test.go

@ -92,7 +92,8 @@ func TestRandomFileChunksCompact(t *testing.T) {
visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64) visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64)
for _, v := range visibles {
for visible := visibles.Front(); visible != nil; visible = visible.Next {
v := visible.Value
for x := v.start; x < v.stop; x++ { for x := v.start; x < v.stop; x++ {
assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId) assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId)
} }
@ -137,7 +138,7 @@ func TestIntervalMerging(t *testing.T) {
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 70, fileId: "b"}, {start: 0, stop: 70, fileId: "b"},
{start: 70, stop: 100, fileId: "a", chunkOffset: 70},
{start: 70, stop: 100, fileId: "a", offsetInChunk: 70},
}, },
}, },
// case 3: updates overwrite full chunks // case 3: updates overwrite full chunks
@ -174,15 +175,15 @@ func TestIntervalMerging(t *testing.T) {
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 200, fileId: "d"}, {start: 0, stop: 200, fileId: "d"},
{start: 200, stop: 220, fileId: "c", chunkOffset: 130},
{start: 200, stop: 220, fileId: "c", offsetInChunk: 130},
}, },
}, },
// case 6: same updates // case 6: same updates
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 100, fileId: "xyz"}, {start: 0, stop: 100, fileId: "xyz"},
@ -228,11 +229,17 @@ func TestIntervalMerging(t *testing.T) {
for i, testcase := range testcases { for i, testcase := range testcases {
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i) log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64) intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64)
for x, interval := range intervals {
log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
i, x, interval.start, interval.stop, interval.fileId)
}
for x, interval := range intervals {
x := -1
for visible := intervals.Front(); visible != nil; visible = visible.Next {
x++
interval := visible.Value
log.Printf("test case %d, interval start=%d, stop=%d, fileId=%s",
i, interval.start, interval.stop, interval.fileId)
}
x = -1
for visible := intervals.Front(); visible != nil; visible = visible.Next {
x++
interval := visible.Value
if interval.start != testcase.Expected[x].start { if interval.start != testcase.Expected[x].start {
t.Fatalf("failed on test case %d, interval %d, start %d, expect %d", t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
i, x, interval.start, testcase.Expected[x].start) i, x, interval.start, testcase.Expected[x].start)
@ -245,13 +252,13 @@ func TestIntervalMerging(t *testing.T) {
t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s", t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
i, x, interval.fileId, testcase.Expected[x].fileId) i, x, interval.fileId, testcase.Expected[x].fileId)
} }
if interval.chunkOffset != testcase.Expected[x].chunkOffset {
t.Fatalf("failed on test case %d, interval %d, chunkOffset %d, expect %d",
i, x, interval.chunkOffset, testcase.Expected[x].chunkOffset)
if interval.offsetInChunk != testcase.Expected[x].offsetInChunk {
t.Fatalf("failed on test case %d, interval %d, offsetInChunk %d, expect %d",
i, x, interval.offsetInChunk, testcase.Expected[x].offsetInChunk)
} }
} }
if len(intervals) != len(testcase.Expected) {
t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected))
if intervals.Len() != len(testcase.Expected) {
t.Fatalf("failed to compact test case %d, len %d expected %d", i, intervals.Len(), len(testcase.Expected))
} }
} }
@ -276,9 +283,9 @@ func TestChunksReading(t *testing.T) {
Offset: 0, Offset: 0,
Size: 250, Size: 250,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
{Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200},
{OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0},
{OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100},
{OffsetInChunk: 0, ViewSize: 50, FileId: "fsad", ViewOffset: 200},
}, },
}, },
// case 1: updates overwrite full chunks // case 1: updates overwrite full chunks
@ -290,7 +297,7 @@ func TestChunksReading(t *testing.T) {
Offset: 50, Offset: 50,
Size: 100, Size: 100,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50},
{OffsetInChunk: 50, ViewSize: 100, FileId: "asdf", ViewOffset: 50},
}, },
}, },
// case 2: updates overwrite part of previous chunks // case 2: updates overwrite part of previous chunks
@ -302,8 +309,8 @@ func TestChunksReading(t *testing.T) {
Offset: 30, Offset: 30,
Size: 40, Size: 40,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 20, Size: 30, FileId: "b", LogicOffset: 30},
{Offset: 57, Size: 10, FileId: "a", LogicOffset: 60},
{OffsetInChunk: 20, ViewSize: 30, FileId: "b", ViewOffset: 30},
{OffsetInChunk: 57, ViewSize: 10, FileId: "a", ViewOffset: 60},
}, },
}, },
// case 3: updates overwrite full chunks // case 3: updates overwrite full chunks
@ -316,8 +323,8 @@ func TestChunksReading(t *testing.T) {
Offset: 0, Offset: 0,
Size: 200, Size: 200,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0},
{Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50},
{OffsetInChunk: 0, ViewSize: 50, FileId: "asdf", ViewOffset: 0},
{OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 50},
}, },
}, },
// case 4: updates far away from prev chunks // case 4: updates far away from prev chunks
@ -330,8 +337,8 @@ func TestChunksReading(t *testing.T) {
Offset: 0, Offset: 0,
Size: 400, Size: 400,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
{Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 250},
{OffsetInChunk: 0, ViewSize: 200, FileId: "asdf", ViewOffset: 0},
{OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 250},
}, },
}, },
// case 5: updates overwrite full chunks // case 5: updates overwrite full chunks
@ -345,21 +352,21 @@ func TestChunksReading(t *testing.T) {
Offset: 0, Offset: 0,
Size: 220, Size: 220,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 200, FileId: "c", LogicOffset: 0},
{Offset: 130, Size: 20, FileId: "b", LogicOffset: 200},
{OffsetInChunk: 0, ViewSize: 200, FileId: "c", ViewOffset: 0},
{OffsetInChunk: 130, ViewSize: 20, FileId: "b", ViewOffset: 200},
}, },
}, },
// case 6: same updates // case 6: same updates
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125},
}, },
Offset: 0, Offset: 0,
Size: 100, Size: 100,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 100, FileId: "xyz", LogicOffset: 0},
{OffsetInChunk: 0, ViewSize: 100, FileId: "xyz", ViewOffset: 0},
}, },
}, },
// case 7: edge cases // case 7: edge cases
@ -372,8 +379,8 @@ func TestChunksReading(t *testing.T) {
Offset: 0, Offset: 0,
Size: 200, Size: 200,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
{OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0},
{OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100},
}, },
}, },
// case 8: edge cases // case 8: edge cases
@ -386,9 +393,9 @@ func TestChunksReading(t *testing.T) {
Offset: 0, Offset: 0,
Size: 300, Size: 300,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0},
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90},
{Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190},
{OffsetInChunk: 0, ViewSize: 90, FileId: "abc", ViewOffset: 0},
{OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 90},
{OffsetInChunk: 0, ViewSize: 110, FileId: "fsad", ViewOffset: 190},
}, },
}, },
// case 9: edge cases // case 9: edge cases
@ -404,12 +411,12 @@ func TestChunksReading(t *testing.T) {
Offset: 0, Offset: 0,
Size: 153578836, Size: 153578836,
Expected: []*ChunkView{ Expected: []*ChunkView{
{Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0},
{Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936},
{Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760},
{Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736},
{Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168},
{Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248},
{OffsetInChunk: 0, ViewSize: 43175936, FileId: "2,111fc2cbfac1", ViewOffset: 0},
{OffsetInChunk: 0, ViewSize: 52981760 - 43175936, FileId: "2,112a36ea7f85", ViewOffset: 43175936},
{OffsetInChunk: 0, ViewSize: 72564736 - 52981760, FileId: "4,112d5f31c5e7", ViewOffset: 52981760},
{OffsetInChunk: 0, ViewSize: 133255168 - 72564736, FileId: "1,113245f0cdb6", ViewOffset: 72564736},
{OffsetInChunk: 0, ViewSize: 137269248 - 133255168, FileId: "3,1141a70733b5", ViewOffset: 133255168},
{OffsetInChunk: 0, ViewSize: 153578836 - 137269248, FileId: "1,114201d5bbdb", ViewOffset: 137269248},
}, },
}, },
} }
@ -420,28 +427,31 @@ func TestChunksReading(t *testing.T) {
} }
log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i) log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size) chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size)
for x, chunk := range chunks {
x := -1
for c := chunks.Front(); c != nil; c = c.Next {
x++
chunk := c.Value
log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s", log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
i, x, chunk.Offset, chunk.Size, chunk.FileId)
if chunk.Offset != testcase.Expected[x].Offset {
i, x, chunk.OffsetInChunk, chunk.ViewSize, chunk.FileId)
if chunk.OffsetInChunk != testcase.Expected[x].OffsetInChunk {
t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d", t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d",
i, chunk.FileId, chunk.Offset, testcase.Expected[x].Offset)
i, chunk.FileId, chunk.OffsetInChunk, testcase.Expected[x].OffsetInChunk)
} }
if chunk.Size != testcase.Expected[x].Size {
t.Fatalf("failed on read case %d, chunk %s, Size %d, expect %d",
i, chunk.FileId, chunk.Size, testcase.Expected[x].Size)
if chunk.ViewSize != testcase.Expected[x].ViewSize {
t.Fatalf("failed on read case %d, chunk %s, ViewSize %d, expect %d",
i, chunk.FileId, chunk.ViewSize, testcase.Expected[x].ViewSize)
} }
if chunk.FileId != testcase.Expected[x].FileId { if chunk.FileId != testcase.Expected[x].FileId {
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s", t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
i, x, chunk.FileId, testcase.Expected[x].FileId) i, x, chunk.FileId, testcase.Expected[x].FileId)
} }
if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
if chunk.ViewOffset != testcase.Expected[x].ViewOffset {
t.Fatalf("failed on read case %d, chunk %d, ViewOffset %d, expect %d",
i, x, chunk.ViewOffset, testcase.Expected[x].ViewOffset)
} }
} }
if len(chunks) != len(testcase.Expected) {
t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
if chunks.Len() != len(testcase.Expected) {
t.Fatalf("failed to read test case %d, len %d expected %d", i, chunks.Len(), len(testcase.Expected))
} }
} }
@ -467,73 +477,79 @@ func BenchmarkCompactFileChunks(b *testing.B) {
} }
} }
func addVisibleInterval(visibles *IntervalList[*VisibleInterval], x *VisibleInterval) {
visibles.AppendInterval(&Interval[*VisibleInterval]{
StartOffset: x.start,
StopOffset: x.stop,
TsNs: x.modifiedTsNs,
Value: x,
})
}
func TestViewFromVisibleIntervals(t *testing.T) { func TestViewFromVisibleIntervals(t *testing.T) {
visibles := []VisibleInterval{
{
visibles := NewIntervalList[*VisibleInterval]()
addVisibleInterval(visibles, &VisibleInterval{
start: 0, start: 0,
stop: 25, stop: 25,
fileId: "fid1", fileId: "fid1",
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 4096, start: 4096,
stop: 8192, stop: 8192,
fileId: "fid2", fileId: "fid2",
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 16384, start: 16384,
stop: 18551, stop: 18551,
fileId: "fid3", fileId: "fid3",
},
}
})
views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
if len(views) != len(visibles) {
assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error")
if views.Len() != visibles.Len() {
assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
} }
} }
func TestViewFromVisibleIntervals2(t *testing.T) { func TestViewFromVisibleIntervals2(t *testing.T) {
visibles := []VisibleInterval{
{
visibles := NewIntervalList[*VisibleInterval]()
addVisibleInterval(visibles, &VisibleInterval{
start: 344064, start: 344064,
stop: 348160, stop: 348160,
fileId: "fid1", fileId: "fid1",
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 348160, start: 348160,
stop: 356352, stop: 356352,
fileId: "fid2", fileId: "fid2",
},
}
})
views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
if len(views) != len(visibles) {
assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error")
if views.Len() != visibles.Len() {
assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
} }
} }
func TestViewFromVisibleIntervals3(t *testing.T) { func TestViewFromVisibleIntervals3(t *testing.T) {
visibles := []VisibleInterval{
{
visibles := NewIntervalList[*VisibleInterval]()
addVisibleInterval(visibles, &VisibleInterval{
start: 1000, start: 1000,
stop: 2000, stop: 2000,
fileId: "fid1", fileId: "fid1",
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 3000, start: 3000,
stop: 4000, stop: 4000,
fileId: "fid2", fileId: "fid2",
},
}
})
views := ViewFromVisibleIntervals(visibles, 1700, 1500) views := ViewFromVisibleIntervals(visibles, 1700, 1500)
if len(views) != len(visibles) {
assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error")
if views.Len() != visibles.Len() {
assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
} }
} }

9
weed/filer/filer.go

@ -3,14 +3,15 @@ package filer
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/cluster"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"os" "os"
"sort" "sort"
"strings" "strings"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/cluster"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
@ -254,8 +255,10 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
} }
} else { } else {
if !strings.HasPrefix("/"+util.Join(dirParts[:]...), SystemLogDir) {
f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil) f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
} }
}
} else if !dirEntry.IsDirectory() { } else if !dirEntry.IsDirectory() {
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)

2
weed/filer/filer_conf.go

@ -32,7 +32,7 @@ type FilerConf struct {
func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) { func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) {
var buf bytes.Buffer var buf bytes.Buffer
if err := pb.WithGrpcFilerClient(false, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err := pb.WithGrpcFilerClient(false, 0, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if masterClient != nil { if masterClient != nil {
return ReadEntry(masterClient, client, DirectoryEtcSeaweedFS, FilerConfName, &buf) return ReadEntry(masterClient, client, DirectoryEtcSeaweedFS, FilerConfName, &buf)
} else { } else {

2
weed/filer/filer_notify_append.go

@ -40,7 +40,7 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error {
} }
// append to existing chunks // append to existing chunks
entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset))
entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset, time.Now().UnixNano()))
// update the entry // update the entry
err = f.CreateEntry(context.Background(), entry, false, false, nil, false) err = f.CreateEntry(context.Background(), entry, false, false, nil, false)

259
weed/filer/interval_list.go

@ -0,0 +1,259 @@
package filer
import (
"math"
"sync"
)
type IntervalValue interface {
SetStartStop(start, stop int64)
Clone() IntervalValue
}
type Interval[T IntervalValue] struct {
StartOffset int64
StopOffset int64
TsNs int64
Value T
Prev *Interval[T]
Next *Interval[T]
}
func (interval *Interval[T]) Size() int64 {
return interval.StopOffset - interval.StartOffset
}
// IntervalList mark written intervals within one page chunk
type IntervalList[T IntervalValue] struct {
head *Interval[T]
tail *Interval[T]
Lock sync.Mutex
}
func NewIntervalList[T IntervalValue]() *IntervalList[T] {
list := &IntervalList[T]{
head: &Interval[T]{
StartOffset: -1,
StopOffset: -1,
},
tail: &Interval[T]{
StartOffset: math.MaxInt64,
StopOffset: math.MaxInt64,
},
}
return list
}
func (list *IntervalList[T]) Front() (interval *Interval[T]) {
return list.head.Next
}
func (list *IntervalList[T]) AppendInterval(interval *Interval[T]) {
list.Lock.Lock()
defer list.Lock.Unlock()
if list.head.Next == nil {
list.head.Next = interval
}
interval.Prev = list.tail.Prev
if list.tail.Prev != nil {
list.tail.Prev.Next = interval
}
list.tail.Prev = interval
}
func (list *IntervalList[T]) Overlay(startOffset, stopOffset, tsNs int64, value T) {
if startOffset >= stopOffset {
return
}
interval := &Interval[T]{
StartOffset: startOffset,
StopOffset: stopOffset,
TsNs: tsNs,
Value: value,
}
list.Lock.Lock()
defer list.Lock.Unlock()
list.overlayInterval(interval)
}
func (list *IntervalList[T]) InsertInterval(startOffset, stopOffset, tsNs int64, value T) {
interval := &Interval[T]{
StartOffset: startOffset,
StopOffset: stopOffset,
TsNs: tsNs,
Value: value,
}
list.Lock.Lock()
defer list.Lock.Unlock()
value.SetStartStop(startOffset, stopOffset)
list.insertInterval(interval)
}
func (list *IntervalList[T]) insertInterval(interval *Interval[T]) {
prev := list.head
next := prev.Next
for interval.StartOffset < interval.StopOffset {
if next == nil {
// add to the end
list.insertBetween(prev, interval, list.tail)
break
}
// interval is ahead of the next
if interval.StopOffset <= next.StartOffset {
list.insertBetween(prev, interval, next)
break
}
// interval is after the next
if next.StopOffset <= interval.StartOffset {
prev = next
next = next.Next
continue
}
// intersecting next and interval
if interval.TsNs >= next.TsNs {
// interval is newer
if next.StartOffset < interval.StartOffset {
// left side of next is ahead of interval
t := &Interval[T]{
StartOffset: next.StartOffset,
StopOffset: interval.StartOffset,
TsNs: next.TsNs,
Value: next.Value.Clone().(T),
}
t.Value.SetStartStop(t.StartOffset, t.StopOffset)
list.insertBetween(prev, t, interval)
next.StartOffset = interval.StartOffset
next.Value.SetStartStop(next.StartOffset, next.StopOffset)
prev = t
}
if interval.StopOffset < next.StopOffset {
// right side of next is after interval
next.StartOffset = interval.StopOffset
next.Value.SetStartStop(next.StartOffset, next.StopOffset)
list.insertBetween(prev, interval, next)
break
} else {
// next is covered
prev.Next = interval
next = next.Next
}
} else {
// next is newer
if interval.StartOffset < next.StartOffset {
// left side of interval is ahead of next
t := &Interval[T]{
StartOffset: interval.StartOffset,
StopOffset: next.StartOffset,
TsNs: interval.TsNs,
Value: interval.Value.Clone().(T),
}
t.Value.SetStartStop(t.StartOffset, t.StopOffset)
list.insertBetween(prev, t, next)
interval.StartOffset = next.StartOffset
interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset)
}
if next.StopOffset < interval.StopOffset {
// right side of interval is after next
interval.StartOffset = next.StopOffset
interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset)
} else {
// interval is covered
break
}
}
}
}
func (list *IntervalList[T]) insertBetween(a, interval, b *Interval[T]) {
a.Next = interval
b.Prev = interval
if a != list.head {
interval.Prev = a
}
if b != list.tail {
interval.Next = b
}
}
func (list *IntervalList[T]) overlayInterval(interval *Interval[T]) {
//t := list.head
//for ; t.Next != nil; t = t.Next {
// if t.TsNs > interval.TsNs {
// println("writes is out of order", t.TsNs-interval.TsNs, "ns")
// }
//}
p := list.head
for ; p.Next != nil && p.Next.StopOffset <= interval.StartOffset; p = p.Next {
}
q := list.tail
for ; q.Prev != nil && q.Prev.StartOffset >= interval.StopOffset; q = q.Prev {
}
// left side
// interval after p.Next start
if p.Next != nil && p.Next.StartOffset < interval.StartOffset {
t := &Interval[T]{
StartOffset: p.Next.StartOffset,
StopOffset: interval.StartOffset,
TsNs: p.Next.TsNs,
Value: p.Next.Value,
}
p.Next = t
if p != list.head {
t.Prev = p
}
t.Next = interval
interval.Prev = t
} else {
p.Next = interval
if p != list.head {
interval.Prev = p
}
}
// right side
// interval ends before p.Prev
if q.Prev != nil && interval.StopOffset < q.Prev.StopOffset {
t := &Interval[T]{
StartOffset: interval.StopOffset,
StopOffset: q.Prev.StopOffset,
TsNs: q.Prev.TsNs,
Value: q.Prev.Value,
}
q.Prev = t
if q != list.tail {
t.Next = q
}
interval.Next = t
t.Prev = interval
} else {
q.Prev = interval
if q != list.tail {
interval.Next = q
}
}
}
func (list *IntervalList[T]) Len() int {
list.Lock.Lock()
defer list.Lock.Unlock()
var count int
for t := list.head; t != nil; t = t.Next {
count++
}
return count - 1
}

327
weed/filer/interval_list_test.go

@ -0,0 +1,327 @@
package filer
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
)
type IntervalInt int
func (i IntervalInt) SetStartStop(start, stop int64) {
}
func (i IntervalInt) Clone() IntervalValue {
return i
}
func TestIntervalList_Overlay(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(0, 100, 1, 1)
list.Overlay(50, 150, 2, 2)
list.Overlay(200, 250, 3, 3)
list.Overlay(225, 250, 4, 4)
list.Overlay(175, 210, 5, 5)
list.Overlay(0, 25, 6, 6)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 6, list.Len())
println()
list.Overlay(50, 150, 7, 7)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 6, list.Len())
}
func TestIntervalList_Overlay2(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(0, 50, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
}
func TestIntervalList_Overlay3(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
assert.Equal(t, 1, list.Len())
list.Overlay(0, 60, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_Overlay4(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(0, 100, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 1, list.Len())
}
func TestIntervalList_Overlay5(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(0, 110, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 1, list.Len())
}
func TestIntervalList_Overlay6(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(50, 110, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 1, list.Len())
}
func TestIntervalList_Overlay7(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(50, 90, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_Overlay8(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(60, 90, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 3, list.Len())
}
func TestIntervalList_Overlay9(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(60, 100, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_Overlay10(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(50, 100, 1, 1)
list.Overlay(60, 110, 2, 2)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_Overlay11(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.Overlay(0, 100, 1, 1)
list.Overlay(100, 110, 2, 2)
list.Overlay(0, 90, 3, 3)
list.Overlay(0, 80, 4, 4)
list.Overlay(0, 90, 5, 5)
list.Overlay(90, 90, 6, 6)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 3, list.Len())
}
func TestIntervalList_insertInterval1(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 150, 2, 2)
list.InsertInterval(200, 250, 3, 3)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_insertInterval2(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 150, 2, 2)
list.InsertInterval(0, 25, 3, 3)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_insertInterval3(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 150, 2, 2)
list.InsertInterval(200, 250, 4, 4)
list.InsertInterval(0, 75, 3, 3)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 3, list.Len())
}
func TestIntervalList_insertInterval4(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(200, 250, 4, 4)
list.InsertInterval(0, 225, 3, 3)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_insertInterval5(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(200, 250, 4, 4)
list.InsertInterval(0, 225, 5, 5)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_insertInterval6(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 150, 2, 2)
list.InsertInterval(200, 250, 4, 4)
list.InsertInterval(0, 275, 1, 1)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 5, list.Len())
}
func TestIntervalList_insertInterval7(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 150, 2, 2)
list.InsertInterval(200, 250, 4, 4)
list.InsertInterval(75, 275, 1, 1)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 4, list.Len())
}
func TestIntervalList_insertInterval8(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 150, 2, 2)
list.InsertInterval(200, 250, 4, 4)
list.InsertInterval(75, 275, 3, 3)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 4, list.Len())
}
func TestIntervalList_insertInterval9(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 150, 2, 2)
list.InsertInterval(200, 250, 4, 4)
list.InsertInterval(50, 150, 3, 3)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 2, list.Len())
}
func TestIntervalList_insertInterval10(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(50, 100, 2, 2)
list.InsertInterval(200, 300, 4, 4)
list.InsertInterval(100, 200, 5, 5)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 3, list.Len())
}
func TestIntervalList_insertInterval11(t *testing.T) {
list := NewIntervalList[IntervalInt]()
list.InsertInterval(0, 64, 1, 1)
list.InsertInterval(72, 136, 3, 3)
list.InsertInterval(64, 128, 2, 2)
list.InsertInterval(68, 72, 4, 4)
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 4, list.Len())
}
type IntervalStruct struct {
x int
start int64
stop int64
}
func newIntervalStruct(i int) IntervalStruct {
return IntervalStruct{
x: i,
}
}
func (i IntervalStruct) SetStartStop(start, stop int64) {
i.start, i.stop = start, stop
}
func (i IntervalStruct) Clone() IntervalValue {
return &IntervalStruct{
x: i.x,
start: i.start,
stop: i.stop,
}
}
func TestIntervalList_insertIntervalStruct(t *testing.T) {
list := NewIntervalList[IntervalStruct]()
list.InsertInterval(0, 64, 1, newIntervalStruct(1))
list.InsertInterval(64, 72, 2, newIntervalStruct(2))
list.InsertInterval(72, 136, 3, newIntervalStruct(3))
list.InsertInterval(64, 68, 4, newIntervalStruct(4))
for p := list.Front(); p != nil; p = p.Next {
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value)
}
assert.Equal(t, 4, list.Len())
}

16
weed/filer/meta_aggregator.go

@ -192,7 +192,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
} }
glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId) glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId)
err = pb.WithFilerClient(true, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
err = pb.WithFilerClient(true, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
atomic.AddInt32(&ma.filer.UniqueFilerEpoch, 1) atomic.AddInt32(&ma.filer.UniqueFilerEpoch, 1)
@ -228,7 +228,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
} }
func (ma *MetaAggregator) readFilerStoreSignature(peer pb.ServerAddress) (sig int32, err error) { func (ma *MetaAggregator) readFilerStoreSignature(peer pb.ServerAddress) (sig int32, err error) {
err = pb.WithFilerClient(false, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
err = pb.WithFilerClient(false, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil { if err != nil {
return err return err
@ -243,10 +243,15 @@ const (
MetaOffsetPrefix = "Meta" MetaOffsetPrefix = "Meta"
) )
func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignature int32) (lastTsNs int64, err error) {
func GetPeerMetaOffsetKey(peerSignature int32) []byte {
key := []byte(MetaOffsetPrefix + "xxxx") key := []byte(MetaOffsetPrefix + "xxxx")
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
return key
}
func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignature int32) (lastTsNs int64, err error) {
key := GetPeerMetaOffsetKey(peerSignature)
value, err := f.Store.KvGet(context.Background(), key) value, err := f.Store.KvGet(context.Background(), key)
@ -263,8 +268,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignat
func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSignature int32, lastTsNs int64) (err error) { func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSignature int32, lastTsNs int64) (err error) {
key := []byte(MetaOffsetPrefix + "xxxx")
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
key := GetPeerMetaOffsetKey(peerSignature)
value := make([]byte, 8) value := make([]byte, 8)
util.Uint64toBytes(value, uint64(lastTsNs)) util.Uint64toBytes(value, uint64(lastTsNs))

14
weed/filer/mysql/mysql_sql_gen.go

@ -21,32 +21,32 @@ func (gen *SqlGenMysql) GetSqlInsert(tableName string) string {
if gen.UpsertQueryTemplate != "" { if gen.UpsertQueryTemplate != "" {
return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) return fmt.Sprintf(gen.UpsertQueryTemplate, tableName)
} else { } else {
return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", tableName)
return fmt.Sprintf("INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES(?,?,?,?)", tableName)
} }
} }
func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string { func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string {
return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", tableName)
return fmt.Sprintf("UPDATE `%s` SET `meta` = ? WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName)
} }
func (gen *SqlGenMysql) GetSqlFind(tableName string) string { func (gen *SqlGenMysql) GetSqlFind(tableName string) string {
return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName)
return fmt.Sprintf("SELECT `meta` FROM `%s` WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName)
} }
func (gen *SqlGenMysql) GetSqlDelete(tableName string) string { func (gen *SqlGenMysql) GetSqlDelete(tableName string) string {
return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName)
return fmt.Sprintf("DELETE FROM `%s` WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName)
} }
func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string { func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string {
return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", tableName)
return fmt.Sprintf("DELETE FROM `%s` WHERE `dirhash` = ? AND `directory` = ?", tableName)
} }
func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string { func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string {
return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName)
return fmt.Sprintf("SELECT `name`, `meta` FROM `%s` WHERE `dirhash` = ? AND `name` > ? AND `directory` = ? AND `name` LIKE ? ORDER BY `name` ASC LIMIT ?", tableName)
} }
func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string { func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string {
return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName)
return fmt.Sprintf("SELECT `name`, `meta` FROM `%s` WHERE `dirhash` = ? AND `name` >= ? AND `directory` = ? AND `name` LIKE ? ORDER BY `name` ASC LIMIT ?", tableName)
} }
func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string { func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string {

4
weed/filer/mysql/mysql_store.go

@ -13,7 +13,7 @@ import (
) )
const ( const (
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?collation=utf8mb4_bin"
) )
func init() { func init() {
@ -53,7 +53,7 @@ func (store *MysqlStore) initialize(upsertQuery string, enableUpsert bool, user,
} }
store.SqlGenerator = &SqlGenMysql{ store.SqlGenerator = &SqlGenMysql{
CreateTableSqlTemplate: "", CreateTableSqlTemplate: "",
DropTableSqlTemplate: "drop table `%s`",
DropTableSqlTemplate: "DROP TABLE `%s`",
UpsertQueryTemplate: upsertQuery, UpsertQueryTemplate: upsertQuery,
} }

4
weed/filer/mysql2/mysql2_store.go

@ -15,7 +15,7 @@ import (
) )
const ( const (
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8"
CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?collation=utf8mb4_bin"
) )
var _ filer.BucketAware = (*MysqlStore2)(nil) var _ filer.BucketAware = (*MysqlStore2)(nil)
@ -58,7 +58,7 @@ func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpse
} }
store.SqlGenerator = &mysql.SqlGenMysql{ store.SqlGenerator = &mysql.SqlGenMysql{
CreateTableSqlTemplate: createTable, CreateTableSqlTemplate: createTable,
DropTableSqlTemplate: "drop table `%s`",
DropTableSqlTemplate: "DROP TABLE `%s`",
UpsertQueryTemplate: upsertQuery, UpsertQueryTemplate: upsertQuery,
} }

66
weed/filer/reader_at.go

@ -10,14 +10,12 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/seaweedfs/weed/wdclient" "github.com/seaweedfs/seaweedfs/weed/wdclient"
) )
type ChunkReadAt struct { type ChunkReadAt struct {
masterClient *wdclient.MasterClient masterClient *wdclient.MasterClient
chunkViews []*ChunkView
readerLock sync.Mutex
chunkViews *IntervalList[*ChunkView]
fileSize int64 fileSize int64
readerCache *ReaderCache readerCache *ReaderCache
readerPattern *ReaderPattern readerPattern *ReaderPattern
@ -89,12 +87,12 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
} }
} }
func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt {
func NewChunkReaderAtFromClient(readerCache *ReaderCache, chunkViews *IntervalList[*ChunkView], fileSize int64) *ChunkReadAt {
return &ChunkReadAt{ return &ChunkReadAt{
chunkViews: chunkViews, chunkViews: chunkViews,
fileSize: fileSize, fileSize: fileSize,
readerCache: newReaderCache(32, chunkCache, lookupFn),
readerCache: readerCache,
readerPattern: NewReaderPattern(), readerPattern: NewReaderPattern(),
} }
} }
@ -108,44 +106,58 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
c.readerPattern.MonitorReadAt(offset, len(p)) c.readerPattern.MonitorReadAt(offset, len(p))
c.readerLock.Lock()
defer c.readerLock.Unlock()
c.chunkViews.Lock.Lock()
defer c.chunkViews.Lock.Unlock()
// glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
n, _, err = c.doReadAt(p, offset)
return
}
func (c *ChunkReadAt) ReadAtWithTime(p []byte, offset int64) (n int, ts int64, err error) {
c.readerPattern.MonitorReadAt(offset, len(p))
c.chunkViews.Lock.Lock()
defer c.chunkViews.Lock.Unlock()
// glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
return c.doReadAt(p, offset) return c.doReadAt(p, offset)
} }
func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err error) {
startOffset, remaining := offset, int64(len(p)) startOffset, remaining := offset, int64(len(p))
var nextChunks []*ChunkView
for i, chunk := range c.chunkViews {
var nextChunks *Interval[*ChunkView]
for x := c.chunkViews.Front(); x != nil; x = x.Next {
chunk := x.Value
if remaining <= 0 { if remaining <= 0 {
break break
} }
if i+1 < len(c.chunkViews) {
nextChunks = c.chunkViews[i+1:]
if x.Next != nil {
nextChunks = x.Next
} }
if startOffset < chunk.LogicOffset {
gap := chunk.LogicOffset - startOffset
glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.LogicOffset)
if startOffset < chunk.ViewOffset {
gap := chunk.ViewOffset - startOffset
glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset)
n += zero(p, startOffset-offset, gap) n += zero(p, startOffset-offset, gap)
startOffset, remaining = chunk.LogicOffset, remaining-gap
startOffset, remaining = chunk.ViewOffset, remaining-gap
if remaining <= 0 { if remaining <= 0 {
break break
} }
} }
// fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size))
chunkStart, chunkStop := max(chunk.LogicOffset, startOffset), min(chunk.LogicOffset+int64(chunk.Size), startOffset+remaining)
// fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize))
chunkStart, chunkStop := max(chunk.ViewOffset, startOffset), min(chunk.ViewOffset+int64(chunk.ViewSize), startOffset+remaining)
if chunkStart >= chunkStop { if chunkStart >= chunkStop {
continue continue
} }
// glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
// glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize))
bufferOffset := chunkStart - chunk.ViewOffset + chunk.OffsetInChunk
ts = chunk.ModifiedTsNs
copied, err := c.readChunkSliceAt(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset)) copied, err := c.readChunkSliceAt(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset))
if err != nil { if err != nil {
glog.Errorf("fetching chunk %+v: %v\n", chunk, err) glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
return copied, err
return copied, ts, err
} }
n += copied n += copied
@ -177,7 +189,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
} }
func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews []*ChunkView, offset uint64) (n int, err error) {
func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews *Interval[*ChunkView], offset uint64) (n int, err error) {
if c.readerPattern.IsRandomMode() { if c.readerPattern.IsRandomMode() {
n, err := c.readerCache.chunkCache.ReadChunkAt(buffer, chunkView.FileId, offset) n, err := c.readerCache.chunkCache.ReadChunkAt(buffer, chunkView.FileId, offset)
@ -187,16 +199,14 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next
return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset)) return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
} }
n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.LogicOffset == 0)
n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.ViewOffset == 0)
if c.lastChunkFid != chunkView.FileId { if c.lastChunkFid != chunkView.FileId {
if chunkView.Offset == 0 { // start of a new chunk
if chunkView.OffsetInChunk == 0 { // start of a new chunk
if c.lastChunkFid != "" { if c.lastChunkFid != "" {
c.readerCache.UnCache(c.lastChunkFid) c.readerCache.UnCache(c.lastChunkFid)
c.readerCache.MaybeCache(nextChunkViews)
} else {
if len(nextChunkViews) >= 1 {
c.readerCache.MaybeCache(nextChunkViews[:1]) // just read the next chunk if at the very beginning
} }
if nextChunkViews != nil {
c.readerCache.MaybeCache(nextChunkViews) // just read the next chunk if at the very beginning
} }
} }
} }

72
weed/filer/reader_at_test.go

@ -5,7 +5,6 @@ import (
"io" "io"
"math" "math"
"strconv" "strconv"
"sync"
"testing" "testing"
) )
@ -34,44 +33,42 @@ func (m *mockChunkCache) SetChunk(fileId string, data []byte) {
func TestReaderAt(t *testing.T) { func TestReaderAt(t *testing.T) {
visibles := []VisibleInterval{
{
visibles := NewIntervalList[*VisibleInterval]()
addVisibleInterval(visibles, &VisibleInterval{
start: 1, start: 1,
stop: 2, stop: 2,
fileId: "1", fileId: "1",
chunkSize: 9, chunkSize: 9,
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 3, start: 3,
stop: 4, stop: 4,
fileId: "3", fileId: "3",
chunkSize: 1, chunkSize: 1,
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 5, start: 5,
stop: 6, stop: 6,
fileId: "5", fileId: "5",
chunkSize: 2, chunkSize: 2,
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 7, start: 7,
stop: 9, stop: 9,
fileId: "7", fileId: "7",
chunkSize: 2, chunkSize: 2,
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 9, start: 9,
stop: 10, stop: 10,
fileId: "9", fileId: "9",
chunkSize: 2, chunkSize: 2,
},
}
})
readerAt := &ChunkReadAt{ readerAt := &ChunkReadAt{
chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
readerLock: sync.Mutex{},
fileSize: 10, fileSize: 10,
readerCache: newReaderCache(3, &mockChunkCache{}, nil),
readerCache: NewReaderCache(3, &mockChunkCache{}, nil),
readerPattern: NewReaderPattern(), readerPattern: NewReaderPattern(),
} }
@ -86,7 +83,7 @@ func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, exp
if data == nil { if data == nil {
data = make([]byte, size) data = make([]byte, size)
} }
n, err := readerAt.doReadAt(data, offset)
n, _, err := readerAt.doReadAt(data, offset)
if expectedN != n { if expectedN != n {
t.Errorf("unexpected read size: %d, expect: %d", n, expectedN) t.Errorf("unexpected read size: %d, expect: %d", n, expectedN)
@ -101,26 +98,24 @@ func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, exp
func TestReaderAt0(t *testing.T) { func TestReaderAt0(t *testing.T) {
visibles := []VisibleInterval{
{
visibles := NewIntervalList[*VisibleInterval]()
addVisibleInterval(visibles, &VisibleInterval{
start: 2, start: 2,
stop: 5, stop: 5,
fileId: "1", fileId: "1",
chunkSize: 9, chunkSize: 9,
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 7, start: 7,
stop: 9, stop: 9,
fileId: "2", fileId: "2",
chunkSize: 9, chunkSize: 9,
},
}
})
readerAt := &ChunkReadAt{ readerAt := &ChunkReadAt{
chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
readerLock: sync.Mutex{},
fileSize: 10, fileSize: 10,
readerCache: newReaderCache(3, &mockChunkCache{}, nil),
readerCache: NewReaderCache(3, &mockChunkCache{}, nil),
readerPattern: NewReaderPattern(), readerPattern: NewReaderPattern(),
} }
@ -135,20 +130,18 @@ func TestReaderAt0(t *testing.T) {
func TestReaderAt1(t *testing.T) { func TestReaderAt1(t *testing.T) {
visibles := []VisibleInterval{
{
visibles := NewIntervalList[*VisibleInterval]()
addVisibleInterval(visibles, &VisibleInterval{
start: 2, start: 2,
stop: 5, stop: 5,
fileId: "1", fileId: "1",
chunkSize: 9, chunkSize: 9,
},
}
})
readerAt := &ChunkReadAt{ readerAt := &ChunkReadAt{
chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
readerLock: sync.Mutex{},
fileSize: 20, fileSize: 20,
readerCache: newReaderCache(3, &mockChunkCache{}, nil),
readerCache: NewReaderCache(3, &mockChunkCache{}, nil),
readerPattern: NewReaderPattern(), readerPattern: NewReaderPattern(),
} }
@ -164,26 +157,24 @@ func TestReaderAt1(t *testing.T) {
} }
func TestReaderAtGappedChunksDoNotLeak(t *testing.T) { func TestReaderAtGappedChunksDoNotLeak(t *testing.T) {
visibles := []VisibleInterval{
{
visibles := NewIntervalList[*VisibleInterval]()
addVisibleInterval(visibles, &VisibleInterval{
start: 2, start: 2,
stop: 3, stop: 3,
fileId: "1", fileId: "1",
chunkSize: 5, chunkSize: 5,
},
{
})
addVisibleInterval(visibles, &VisibleInterval{
start: 7, start: 7,
stop: 9, stop: 9,
fileId: "1", fileId: "1",
chunkSize: 4, chunkSize: 4,
},
}
})
readerAt := &ChunkReadAt{ readerAt := &ChunkReadAt{
chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
readerLock: sync.Mutex{},
fileSize: 9, fileSize: 9,
readerCache: newReaderCache(3, &mockChunkCache{}, nil),
readerCache: NewReaderCache(3, &mockChunkCache{}, nil),
readerPattern: NewReaderPattern(), readerPattern: NewReaderPattern(),
} }
@ -193,10 +184,9 @@ func TestReaderAtGappedChunksDoNotLeak(t *testing.T) {
func TestReaderAtSparseFileDoesNotLeak(t *testing.T) { func TestReaderAtSparseFileDoesNotLeak(t *testing.T) {
readerAt := &ChunkReadAt{ readerAt := &ChunkReadAt{
chunkViews: ViewFromVisibleIntervals([]VisibleInterval{}, 0, math.MaxInt64),
readerLock: sync.Mutex{},
chunkViews: ViewFromVisibleIntervals(NewIntervalList[*VisibleInterval](), 0, math.MaxInt64),
fileSize: 3, fileSize: 3,
readerCache: newReaderCache(3, &mockChunkCache{}, nil),
readerCache: NewReaderCache(3, &mockChunkCache{}, nil),
readerPattern: NewReaderPattern(), readerPattern: NewReaderPattern(),
} }

11
weed/filer/reader_cache.go

@ -34,7 +34,7 @@ type SingleChunkCacher struct {
completedTimeNew int64 completedTimeNew int64
} }
func newReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn wdclient.LookupFileIdFunctionType) *ReaderCache {
func NewReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn wdclient.LookupFileIdFunctionType) *ReaderCache {
return &ReaderCache{ return &ReaderCache{
limit: limit, limit: limit,
chunkCache: chunkCache, chunkCache: chunkCache,
@ -43,7 +43,7 @@ func newReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn
} }
} }
func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) {
func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) {
if rc.lookupFileIdFn == nil { if rc.lookupFileIdFn == nil {
return return
} }
@ -55,7 +55,8 @@ func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) {
return return
} }
for _, chunkView := range chunkViews {
for x := chunkViews; x != nil; x = x.Next {
chunkView := x.Value
if _, found := rc.downloaders[chunkView.FileId]; found { if _, found := rc.downloaders[chunkView.FileId]; found {
continue continue
} }
@ -65,7 +66,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) {
return return
} }
// glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.LogicOffset)
// glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
// cache this chunk if not yet // cache this chunk if not yet
cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false) cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false)
go cacher.startCaching() go cacher.startCaching()
@ -207,7 +208,7 @@ func (s *SingleChunkCacher) readChunkAt(buf []byte, offset int64) (int, error) {
return 0, s.err return 0, s.err
} }
if len(s.data) == 0 {
if len(s.data) <= int(offset) {
return 0, nil return 0, nil
} }

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save