Browse Source

Merge branch '10_acl_merged' into _product

pull/5936/head
changlin.shi 2 years ago
parent
commit
a23c0da4c2
  1. 12
      .github/workflows/binaries_dev.yml
  2. 6
      .github/workflows/binaries_release0.yml
  3. 6
      .github/workflows/binaries_release1.yml
  4. 6
      .github/workflows/binaries_release2.yml
  5. 6
      .github/workflows/binaries_release3.yml
  6. 6
      .github/workflows/binaries_release4.yml
  7. 2
      .github/workflows/codeql.yml
  8. 2
      .github/workflows/container_dev.yml
  9. 2
      .github/workflows/container_latest.yml
  10. 2
      .github/workflows/container_release1.yml
  11. 2
      .github/workflows/container_release2.yml
  12. 2
      .github/workflows/container_release3.yml
  13. 2
      .github/workflows/container_release4.yml
  14. 2
      .github/workflows/container_release5.yml
  15. 4
      .github/workflows/depsreview.yml
  16. 4
      .github/workflows/e2e.yml
  17. 4
      .github/workflows/go.yml
  18. 60
      go.mod
  19. 129
      go.sum
  20. 4
      k8s/helm_charts2/Chart.yaml
  21. 11
      k8s/helm_charts2/templates/filer-statefulset.yaml
  22. 11
      k8s/helm_charts2/templates/master-statefulset.yaml
  23. 10
      k8s/helm_charts2/templates/s3-deployment.yaml
  24. 13
      k8s/helm_charts2/templates/volume-statefulset.yaml
  25. 11
      k8s/helm_charts2/values.yaml
  26. 22
      weed/Makefile
  27. 2
      weed/command/filer.go
  28. 22
      weed/command/filer_sync.go
  29. 2
      weed/command/master.go
  30. 2
      weed/command/s3.go
  31. 2
      weed/command/server.go
  32. 2
      weed/command/volume.go
  33. 5
      weed/command/webdav.go
  34. 3
      weed/filer/filechunks.go
  35. 9
      weed/filer/filer_notify.go
  36. 23
      weed/filer/filerstore_wrapper.go
  37. 35
      weed/glog/glog_file.go
  38. 1
      weed/mount/dirty_pages_chunked.go
  39. 30
      weed/mount/filehandle.go
  40. 2
      weed/mount/filehandle_map.go
  41. 2
      weed/mount/filehandle_read.go
  42. 42
      weed/mount/locked_entry.go
  43. 4
      weed/mount/meta_cache/meta_cache.go
  44. 68
      weed/mount/page_writer/upload_pipeline.go
  45. 10
      weed/mount/page_writer/upload_pipeline_lock.go
  46. 2
      weed/mount/weedfs_attr.go
  47. 6
      weed/mount/weedfs_dir_lookup.go
  48. 2
      weed/mount/weedfs_dir_read.go
  49. 2
      weed/mount/weedfs_file_lseek.go
  50. 12
      weed/mount/weedfs_file_sync.go
  51. 2
      weed/mount/weedfs_file_write.go
  52. 7
      weed/mount/weedfs_rename.go
  53. 3
      weed/pb/volume_server.proto
  54. 1407
      weed/pb/volume_server_pb/volume_server.pb.go
  55. 39
      weed/replication/sink/filersink/fetch_write.go
  56. 8
      weed/replication/sink/filersink/filer_sink.go
  57. 11
      weed/s3api/auth_credentials.go
  58. 4
      weed/s3api/auth_credentials_subscribe.go
  59. 70
      weed/s3api/bucket_metadata.go
  60. 50
      weed/s3api/bucket_metadata_test.go
  61. 24
      weed/s3api/filer_multipart.go
  62. 8
      weed/s3api/filer_multipart_test.go
  63. 4
      weed/s3api/s3_constants/acp_ownership.go
  64. 7
      weed/s3api/s3_constants/extend_key.go
  65. 5
      weed/s3api/s3_constants/header.go
  66. 381
      weed/s3api/s3acl/acl_helper.go
  67. 1391
      weed/s3api/s3acl/acl_helper_test.go
  68. 333
      weed/s3api/s3api_acp.go
  69. 57
      weed/s3api/s3api_bucket_handlers.go
  70. 14
      weed/s3api/s3api_object_handlers.go
  71. 65
      weed/s3api/s3api_object_multipart_handlers.go
  72. 9
      weed/s3api/s3api_objects_list_handlers.go
  73. 10
      weed/s3api/s3api_server.go
  74. 14
      weed/s3api/s3err/s3api_errors.go
  75. 6
      weed/s3api/tags.go
  76. 2
      weed/server/common.go
  77. 6
      weed/server/filer_grpc_server_remote.go
  78. 1
      weed/server/filer_grpc_server_rename.go
  79. 17
      weed/server/filer_server_handlers_read.go
  80. 4
      weed/server/filer_server_handlers_write_autochunk.go
  81. 2
      weed/server/filer_ui/filer.html
  82. 2
      weed/server/volume_grpc_remote.go
  83. 12
      weed/server/webdav_server.go
  84. 177
      weed/shell/command_fs_verify.go
  85. 15
      weed/shell/command_volume_list.go
  86. 4
      weed/stats/metrics.go
  87. 2
      weed/storage/volume_read_all.go
  88. 2
      weed/util/constants.go
  89. 3
      weed/weed.go

12
.github/workflows/binaries_dev.yml

@ -38,13 +38,13 @@ jobs:
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk - name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -60,7 +60,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -87,13 +87,13 @@ jobs:
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk - name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -109,7 +109,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release0.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release1.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release2.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release3.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

6
.github/workflows/binaries_release4.yml

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@ -45,7 +45,7 @@ jobs:
binary_name: weed binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full" asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
- name: Go Release Large Disk Binaries - name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@156f172aa3d27f3879a47cad8810b366c299e9e9 # v1.22
uses: wangyoucao577/go-release-action@90da8ebfdc010a0e7d378419a76fd90230a05228 # v1.22
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}

2
.github/workflows/codeql.yml

@ -18,7 +18,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL

2
.github/workflows/container_dev.yml

@ -16,7 +16,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta

2
.github/workflows/container_latest.yml

@ -17,7 +17,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta

2
.github/workflows/container_release1.yml

@ -16,7 +16,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta

2
.github/workflows/container_release2.yml

@ -17,7 +17,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta

2
.github/workflows/container_release3.yml

@ -17,7 +17,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta

2
.github/workflows/container_release4.yml

@ -16,7 +16,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta

2
.github/workflows/container_release5.yml

@ -16,7 +16,7 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta

4
.github/workflows/depsreview.yml

@ -9,6 +9,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: 'Checkout Repository' - name: 'Checkout Repository'
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b
- name: 'Dependency Review' - name: 'Dependency Review'
uses: actions/dependency-review-action@11310527b429536e263dc6cc47873e608189ba21
uses: actions/dependency-review-action@0ff3da6f81b812d4ec3cf37a04e2308c7a723730

4
.github/workflows/e2e.yml

@ -24,13 +24,13 @@ jobs:
timeout-minutes: 15 timeout-minutes: 15
steps: steps:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f # v2
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v2
with: with:
go-version: ^1.13 go-version: ^1.13
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Install dependencies - name: Install dependencies
run: | run: |

4
.github/workflows/go.yml

@ -21,13 +21,13 @@ jobs:
steps: steps:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f # v2
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v2
with: with:
go-version: ^1.13 go-version: ^1.13
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v2
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v2
- name: Get dependencies - name: Get dependencies
run: | run: |

60
go.mod

@ -4,12 +4,12 @@ go 1.19
require ( require (
cloud.google.com/go v0.105.0 // indirect cloud.google.com/go v0.105.0 // indirect
cloud.google.com/go/pubsub v1.26.0
cloud.google.com/go/storage v1.28.0
cloud.google.com/go/pubsub v1.28.0
cloud.google.com/go/storage v1.28.1
github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.37.2 github.com/Shopify/sarama v1.37.2
github.com/aws/aws-sdk-go v1.44.136
github.com/aws/aws-sdk-go v1.44.167
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bwmarrin/snowflake v0.3.0 github.com/bwmarrin/snowflake v0.3.0
github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
@ -31,8 +31,8 @@ require (
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-errors/errors v1.1.1 // indirect github.com/go-errors/errors v1.1.1 // indirect
github.com/go-redis/redis/v8 v8.11.5 github.com/go-redis/redis/v8 v8.11.5
github.com/go-redsync/redsync/v4 v4.6.0
github.com/go-sql-driver/mysql v1.6.0
github.com/go-redsync/redsync/v4 v4.7.1
github.com/go-sql-driver/mysql v1.7.0
github.com/go-zookeeper/zk v1.0.2 // indirect github.com/go-zookeeper/zk v1.0.2 // indirect
github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d
github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang-jwt/jwt v3.2.2+incompatible
@ -58,7 +58,7 @@ require (
github.com/karlseguin/ccache/v2 v2.0.8 github.com/karlseguin/ccache/v2 v2.0.8
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.15.11 // indirect github.com/klauspost/compress v1.15.11 // indirect
github.com/klauspost/reedsolomon v1.11.1
github.com/klauspost/reedsolomon v1.11.3
github.com/kurin/blazer v0.5.3 github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.7 github.com/lib/pq v1.10.7
github.com/linxGnu/grocksdb v1.7.10 github.com/linxGnu/grocksdb v1.7.10
@ -80,7 +80,7 @@ require (
github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0
github.com/prometheus/procfs v0.9.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/seaweedfs/goexif v2.0.0+incompatible github.com/seaweedfs/goexif v2.0.0+incompatible
@ -93,7 +93,7 @@ require (
github.com/stretchr/testify v1.8.1 github.com/stretchr/testify v1.8.1
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
github.com/tidwall/gjson v1.14.3
github.com/tidwall/gjson v1.14.4
github.com/tidwall/match v1.1.1 github.com/tidwall/match v1.1.1
github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/pretty v1.2.0 // indirect
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
@ -107,7 +107,7 @@ require (
github.com/xdg-go/stringprep v1.0.3 // indirect github.com/xdg-go/stringprep v1.0.3 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.etcd.io/etcd/client/v3 v3.5.6 go.etcd.io/etcd/client/v3 v3.5.6
go.mongodb.org/mongo-driver v1.11.0
go.mongodb.org/mongo-driver v1.11.1
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
gocloud.dev v0.27.0 gocloud.dev v0.27.0
gocloud.dev/pubsub/natspubsub v0.27.0 gocloud.dev/pubsub/natspubsub v0.27.0
@ -115,51 +115,52 @@ require (
golang.org/x/crypto v0.1.0 // indirect golang.org/x/crypto v0.1.0 // indirect
golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 golang.org/x/image v0.0.0-20200119044424-58c23975cae1
golang.org/x/net v0.2.0
golang.org/x/net v0.4.0
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
golang.org/x/sys v0.2.0
golang.org/x/text v0.4.0 // indirect
golang.org/x/tools v0.3.0
golang.org/x/sys v0.3.0
golang.org/x/text v0.5.0 // indirect
golang.org/x/tools v0.4.0
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/api v0.103.0
google.golang.org/api v0.105.0
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect
google.golang.org/grpc v1.51.0 google.golang.org/grpc v1.51.0
google.golang.org/protobuf v1.28.1 google.golang.org/protobuf v1.28.1
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect modernc.org/b v1.0.0 // indirect
modernc.org/cc/v3 v3.40.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect
modernc.org/ccgo/v3 v3.16.13 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect
modernc.org/libc v1.21.4 // indirect
modernc.org/libc v1.21.5 // indirect
modernc.org/mathutil v1.5.0 // indirect modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect modernc.org/memory v1.4.0 // indirect
modernc.org/opt v0.1.3 // indirect modernc.org/opt v0.1.3 // indirect
modernc.org/sqlite v1.19.4
modernc.org/sqlite v1.20.0
modernc.org/strutil v1.1.3 modernc.org/strutil v1.1.3
modernc.org/token v1.0.1 // indirect modernc.org/token v1.0.1 // indirect
) )
require ( require (
github.com/Jille/raft-grpc-transport v1.3.0 github.com/Jille/raft-grpc-transport v1.3.0
github.com/arangodb/go-driver v1.4.0
github.com/arangodb/go-driver v1.4.1
github.com/armon/go-metrics v0.4.1 github.com/armon/go-metrics v0.4.1
github.com/fluent/fluent-logger-golang v1.9.0 github.com/fluent/fluent-logger-golang v1.9.0
github.com/google/flatbuffers v22.10.26+incompatible
github.com/google/flatbuffers v22.11.23+incompatible
github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17 github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17
github.com/hashicorp/raft v1.3.11 github.com/hashicorp/raft v1.3.11
github.com/hashicorp/raft-boltdb/v2 v2.2.2 github.com/hashicorp/raft-boltdb/v2 v2.2.2
github.com/rabbitmq/amqp091-go v1.5.0 github.com/rabbitmq/amqp091-go v1.5.0
github.com/tikv/client-go/v2 v2.0.2
github.com/schollz/progressbar/v3 v3.12.2
github.com/tikv/client-go/v2 v2.0.3
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2
github.com/ydb-platform/ydb-go-sdk/v3 v3.40.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.41.0
golang.org/x/sync v0.1.0 golang.org/x/sync v0.1.0
google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1 google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1
) )
require ( require (
cloud.google.com/go/compute v1.12.1 // indirect
cloud.google.com/go/compute/metadata v0.2.1 // indirect
cloud.google.com/go/iam v0.6.0 // indirect
cloud.google.com/go/compute v1.13.0 // indirect
cloud.google.com/go/compute/metadata v0.2.2 // indirect
cloud.google.com/go/iam v0.8.0 // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/aws/aws-sdk-go-v2 v1.16.8 // indirect github.com/aws/aws-sdk-go-v2 v1.16.8 // indirect
github.com/aws/aws-sdk-go-v2/config v1.15.15 // indirect github.com/aws/aws-sdk-go-v2/config v1.15.15 // indirect
@ -196,8 +197,9 @@ require (
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/cpuid/v2 v2.1.1 // indirect github.com/klauspost/cpuid/v2 v2.1.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/montanaflynn/stats v0.6.6 // indirect github.com/montanaflynn/stats v0.6.6 // indirect
github.com/nats-io/nats.go v1.16.0 // indirect github.com/nats-io/nats.go v1.16.0 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nkeys v0.3.0 // indirect
@ -208,8 +210,9 @@ require (
github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect
github.com/pingcap/kvproto v0.0.0-20221026112947-f8d61344b172 // indirect
github.com/pingcap/kvproto v0.0.0-20221129023506-621ec37aac7a // indirect
github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81 // indirect github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stathat/consistent v1.0.0 // indirect github.com/stathat/consistent v1.0.0 // indirect
@ -218,16 +221,17 @@ require (
github.com/tinylib/msgp v1.1.6 // indirect github.com/tinylib/msgp v1.1.6 // indirect
github.com/twmb/murmur3 v1.1.3 // indirect github.com/twmb/murmur3 v1.1.3 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220922065549-66df47a830ba // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20221215182650-986f9d10542f // indirect
github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect
go.etcd.io/bbolt v1.3.6 // indirect go.etcd.io/bbolt v1.3.6 // indirect
go.etcd.io/etcd/api/v3 v3.5.6 // indirect go.etcd.io/etcd/api/v3 v3.5.6 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.22.0 // indirect go.uber.org/zap v1.22.0 // indirect
golang.org/x/mod v0.7.0 // indirect golang.org/x/mod v0.7.0 // indirect
golang.org/x/term v0.3.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

129
go.sum

@ -51,21 +51,21 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU=
cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k=
cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v0.6.0 h1:nsqQC88kT5Iwlm4MeNGTpfMWddp6NB/UOLFTH6m1QfQ=
cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk=
cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
cloud.google.com/go/kms v1.5.0 h1:uc58n3b/n/F2yDMJzHMbXORkJSh3fzO4/+jju6eR7Zg=
cloud.google.com/go/longrunning v0.1.1 h1:y50CXG4j0+qvEukslYFBCrzaXX0qpFbBzc3PchSu/LE=
cloud.google.com/go/kms v1.6.0 h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg=
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4=
cloud.google.com/go/monitoring v1.5.0/go.mod h1:/o9y8NYX5j91JjD/JvGLYbi86kL11OjyJXq2XziLJu4= cloud.google.com/go/monitoring v1.5.0/go.mod h1:/o9y8NYX5j91JjD/JvGLYbi86kL11OjyJXq2XziLJu4=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -73,8 +73,8 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/pubsub v1.24.0/go.mod h1:rWv09Te1SsRpRGPiWOMDKraMQTJyJps4MkUCoMGUgqw= cloud.google.com/go/pubsub v1.24.0/go.mod h1:rWv09Te1SsRpRGPiWOMDKraMQTJyJps4MkUCoMGUgqw=
cloud.google.com/go/pubsub v1.26.0 h1:Y/HcMxVXgkUV2pYeLMUkclMg0ue6U0jVyI5xEARQ4zA=
cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
cloud.google.com/go/pubsub v1.28.0 h1:XzabfdPx/+eNrsVVGLFgeUnQQKPGkMb8klRCeYK52is=
cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
cloud.google.com/go/secretmanager v1.5.0/go.mod h1:5C9kM+RwSpkURNovKySkNvGQLUaOgyoR5W0RUx2SyHQ= cloud.google.com/go/secretmanager v1.5.0/go.mod h1:5C9kM+RwSpkURNovKySkNvGQLUaOgyoR5W0RUx2SyHQ=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
@ -85,8 +85,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.24.0/go.mod h1:3xrJEFMXBsQLgxwThyjuD3aYlroL0TMRec1ypGUQ0KE= cloud.google.com/go/storage v1.24.0/go.mod h1:3xrJEFMXBsQLgxwThyjuD3aYlroL0TMRec1ypGUQ0KE=
cloud.google.com/go/storage v1.28.0 h1:DLrIZ6xkeZX6K70fU/boWx5INJumt6f+nwwWSHXzzGY=
cloud.google.com/go/storage v1.28.0/go.mod h1:qlgZML35PXA3zoEnIkiPLY4/TOkUleufRlu6qmcf7sI=
cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI=
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A=
cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM=
code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
@ -211,8 +211,8 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/arangodb/go-driver v1.4.0 h1:uNCbVYkr5ZP3hIVUP6wqjOVyhMYOL9NDmR762tIeYP0=
github.com/arangodb/go-driver v1.4.0/go.mod h1:5GAx3XvK72DJPhJgyjZOtYAGc4SpY7rZDb3LyhCvLcQ=
github.com/arangodb/go-driver v1.4.1 h1:Jg0N7XKxiKwjswmAcMCnefWmt81KJEqybqRAGJDRWlo=
github.com/arangodb/go-driver v1.4.1/go.mod h1:UTtaxTUMmyPWzKc2dsWWOZzZ3yM6aHWxn/eubGa3YmQ=
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e h1:Xg+hGrY2LcQBbxd0ZFdbGSyRKTYMZCfBbw/pMJFOk1g= github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e h1:Xg+hGrY2LcQBbxd0ZFdbGSyRKTYMZCfBbw/pMJFOk1g=
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e/go.mod h1:mq7Shfa/CaixoDxiyAAc5jZ6CVBAyPaNQCGS7mkj4Ho= github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e/go.mod h1:mq7Shfa/CaixoDxiyAAc5jZ6CVBAyPaNQCGS7mkj4Ho=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -239,8 +239,8 @@ github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4
github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.45/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.45/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.68/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.68/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.136 h1:J1KJJssa8pjU8jETYUxwRS37KTcxjACfKd9GK8t+5ZU=
github.com/aws/aws-sdk-go v1.44.136/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.167 h1:kQmBhGdZkQLU7AiHShSkBJ15zr8agy0QeaxXduvyp2E=
github.com/aws/aws-sdk-go v1.44.167/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.16.8 h1:gOe9UPR98XSf7oEJCcojYg+N2/jCRm4DdeIsP85pIyQ= github.com/aws/aws-sdk-go-v2 v1.16.8 h1:gOe9UPR98XSf7oEJCcojYg+N2/jCRm4DdeIsP85pIyQ=
github.com/aws/aws-sdk-go-v2 v1.16.8/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw= github.com/aws/aws-sdk-go-v2 v1.16.8/go.mod h1:6CpKuLXg2w7If3ABZCl/qZ6rEgwtjZTn4eAf4RcEyuw=
@ -453,7 +453,6 @@ github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.3/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
@ -490,7 +489,6 @@ github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkE
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/uniuri v0.0.0-20160212164326-8902c56451e9/go.mod h1:GgB8SF9nRG+GqaDtLcwJZsQFhcogVCJ79j4EdT0c2V4=
github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
@ -678,12 +676,13 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-redis/redis/v9 v9.0.0-beta.2 h1:ZSr84TsnQyKMAg8gnV+oawuQezeJR11/09THcWCQzr4= github.com/go-redis/redis/v9 v9.0.0-beta.2 h1:ZSr84TsnQyKMAg8gnV+oawuQezeJR11/09THcWCQzr4=
github.com/go-redis/redis/v9 v9.0.0-beta.2/go.mod h1:Bldcd/M/bm9HbnNPi/LUtYBSD8ttcZYBMupwMXhdU0o= github.com/go-redis/redis/v9 v9.0.0-beta.2/go.mod h1:Bldcd/M/bm9HbnNPi/LUtYBSD8ttcZYBMupwMXhdU0o=
github.com/go-redsync/redsync/v4 v4.6.0 h1:CXpvsHB3XzktCleBu2Vo9Df0/qInrTG3jgzhvLzyk+U=
github.com/go-redsync/redsync/v4 v4.6.0/go.mod h1:IxV3sygNwjOERTXrj3XvNMSb1tgNgic8GvM8alwnWcM=
github.com/go-redsync/redsync/v4 v4.7.1 h1:j5rmHCdN5qCEWp5oA2XEbGwtD4LZblqkhbcjCUsfNhs=
github.com/go-redsync/redsync/v4 v4.7.1/go.mod h1:IxV3sygNwjOERTXrj3XvNMSb1tgNgic8GvM8alwnWcM=
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@ -797,8 +796,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/flatbuffers v22.10.26+incompatible h1:z1QiaMyPu1x3Z6xf2u1dsLj1ZxicdGSeaLpCuIsQNZM=
github.com/google/flatbuffers v22.10.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/flatbuffers v22.11.23+incompatible h1:334TygA7iuxt0hoamawsM36xoui01YiouEZnr0qeFMI=
github.com/google/flatbuffers v22.11.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -854,6 +853,7 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk= github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk=
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -1090,6 +1090,7 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
github.com/karlseguin/ccache/v2 v2.0.8 h1:lT38cE//uyf6KcFok0rlgXtGFBWxkI6h/qg4tbFyDnA= github.com/karlseguin/ccache/v2 v2.0.8 h1:lT38cE//uyf6KcFok0rlgXtGFBWxkI6h/qg4tbFyDnA=
github.com/karlseguin/ccache/v2 v2.0.8/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ= github.com/karlseguin/ccache/v2 v2.0.8/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA=
@ -1112,8 +1113,8 @@ github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0= github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0=
github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/reedsolomon v1.11.1 h1:0gCWQXOB8pVe1Y5SGozDA5t2qoVxX3prsV+qHgI/Fik=
github.com/klauspost/reedsolomon v1.11.1/go.mod h1:FXLZzlJIdfqEnQLdUKWNRuMZg747hZ4oYp2Ml60Lb/k=
github.com/klauspost/reedsolomon v1.11.3 h1:rX9UNNvDhJ0Bq45y6uBy/eYehcjyz5faokTuZmu1Q9U=
github.com/klauspost/reedsolomon v1.11.3/go.mod h1:FXLZzlJIdfqEnQLdUKWNRuMZg747hZ4oYp2Ml60Lb/k=
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -1192,8 +1193,8 @@ github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peK
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
@ -1216,6 +1217,8 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
@ -1390,8 +1393,9 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ue
github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0=
github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew=
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E=
github.com/pingcap/kvproto v0.0.0-20221026112947-f8d61344b172 h1:FYgKV9znRQmzVrrJDZ0gUfMIvKLAMU1tu1UKJib8bEQ=
github.com/pingcap/kvproto v0.0.0-20221026112947-f8d61344b172/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= github.com/pingcap/kvproto v0.0.0-20221026112947-f8d61344b172/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI=
github.com/pingcap/kvproto v0.0.0-20221129023506-621ec37aac7a h1:LzIZsQpXQlj8yF7+yvyOg680OaPq7bmPuDuszgXfHsw=
github.com/pingcap/kvproto v0.0.0-20221129023506-621ec37aac7a/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI=
github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81 h1:URLoJ61DmmY++Sa/yyPEQHG2s/ZBeV1FbIswHEMrdoY= github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81 h1:URLoJ61DmmY++Sa/yyPEQHG2s/ZBeV1FbIswHEMrdoY=
github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
@ -1474,8 +1478,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/prometheus v0.35.0/go.mod h1:7HaLx5kEPKJ0GDgbODG0fZgXbQ8K/XjZNJXQmbmgQlY= github.com/prometheus/prometheus v0.35.0/go.mod h1:7HaLx5kEPKJ0GDgbODG0fZgXbQ8K/XjZNJXQmbmgQlY=
github.com/prometheus/prometheus v0.37.0/go.mod h1:egARUgz+K93zwqsVIAneFlLZefyGOON44WyAp4Xqbbk= github.com/prometheus/prometheus v0.37.0/go.mod h1:egARUgz+K93zwqsVIAneFlLZefyGOON44WyAp4Xqbbk=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
@ -1488,6 +1492,9 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -1499,7 +1506,6 @@ github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
@ -1510,6 +1516,8 @@ github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiB
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/schollz/progressbar/v3 v3.12.2 h1:yLqqqpQNMxGxHY8uEshRihaHWwa0rf0yb7/Zrpgq2C0=
github.com/schollz/progressbar/v3 v3.12.2/go.mod h1:HFJYIYQQJX32UJdyoigUl19xoV6aMwZt6iX/C30RWfg=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
@ -1610,15 +1618,15 @@ github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tikv/client-go/v2 v2.0.2 h1:2kBozATh8SEnZONEM0Eeib+5wZ1J8bfjj3wTypSbvtU=
github.com/tikv/client-go/v2 v2.0.2/go.mod h1:X9s4ct/MLk1sFqe5mU79KClKegLFDTa/FCx3hzexGtk=
github.com/tikv/client-go/v2 v2.0.3 h1:/glZOHs/K2pkCioDVae+aThUHFYRYQkEgY4NUTgfh+s=
github.com/tikv/client-go/v2 v2.0.3/go.mod h1:MDT4J9LzgS7Bj1DnEq6Gk/puy6mp8TgUC92zGEVVLLg=
github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07 h1:ckPpxKcl75mO2N6a4cJXiZH43hvcHPpqc9dh1TmH1nc= github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07 h1:ckPpxKcl75mO2N6a4cJXiZH43hvcHPpqc9dh1TmH1nc=
github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07/go.mod h1:CipBxPfxPUME+BImx9MUYXCnAVLS3VJUr3mnSJwh40A= github.com/tikv/pd/client v0.0.0-20221031025758-80f0d8ca4d07/go.mod h1:CipBxPfxPUME+BImx9MUYXCnAVLS3VJUr3mnSJwh40A=
github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw=
@ -1679,13 +1687,13 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e h1:9LPdmD1vqadsDQUva6t2O9MbnyvoOgo8nFNPaOIH5U8= github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e h1:9LPdmD1vqadsDQUva6t2O9MbnyvoOgo8nFNPaOIH5U8=
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE= github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220203104745-929cf9c248bc/go.mod h1:cc138nptTn9eKptCQl/grxP6pBKpo/bnXDiOxuVZtps= github.com/ydb-platform/ydb-go-genproto v0.0.0-20220203104745-929cf9c248bc/go.mod h1:cc138nptTn9eKptCQl/grxP6pBKpo/bnXDiOxuVZtps=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220922065549-66df47a830ba h1:htUAISxEY5MfCLmubsf2EMgN+H62cQTNcTseK5F4cJ0=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20220922065549-66df47a830ba/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20221215182650-986f9d10542f h1:BBczNIM1MJHT7XkIUA8pThXWxJvxoBjcWvne3xwe2RI=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20221215182650-986f9d10542f/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4= github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4=
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4= github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4=
github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88= github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88=
github.com/ydb-platform/ydb-go-sdk/v3 v3.40.0 h1:2ZpiP9RuGUi1qGNlleWTXS9ZeT/6WLjmrPQMNWJkQvw=
github.com/ydb-platform/ydb-go-sdk/v3 v3.40.0/go.mod h1:hJqWSE2NZ2o2c9geHtRJee+xwiHgEfQX9koBZPLTfHY=
github.com/ydb-platform/ydb-go-sdk/v3 v3.41.0 h1:2Kfj7I9EtQ8/o6gLB0mbShrZ/tC3/GFyjVE9z5CCLA0=
github.com/ydb-platform/ydb-go-sdk/v3 v3.41.0/go.mod h1:4bSfSb2PyBtmbFYsaVg96YxK1NjYBA3opn2o6IYvgZ8=
github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo= github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo=
github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE= github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE=
github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg= github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg=
@ -1726,8 +1734,8 @@ go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVd
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.mongodb.org/mongo-driver v1.11.0 h1:FZKhBSTydeuffHj9CBjXlR8vQLee1cQyTWYPA6/tqiE=
go.mongodb.org/mongo-driver v1.11.0/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
go.mongodb.org/mongo-driver v1.11.1 h1:QP0znIRTuL0jf1oBQoAoM0C6ZJfBK4kx0Uumtv1A7w8=
go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
@ -1790,8 +1798,9 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
@ -1989,8 +1998,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM=
golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -2187,14 +2196,16 @@ golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -2204,8 +2215,9 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -2219,7 +2231,7 @@ golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -2249,7 +2261,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -2308,8 +2319,8 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -2369,8 +2380,8 @@ google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6F
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
google.golang.org/api v0.105.0 h1:t6P9Jj+6XTn4U9I2wycQai6Q/Kz7iOT+QzjJ3G2V4x8=
google.golang.org/api v0.105.0/go.mod h1:qh7eD5FJks5+BcE+cjBIm6Gz8vioK7EHvnlniqXBnqI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -2483,8 +2494,8 @@ google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljW
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo=
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 h1:AGXp12e/9rItf6/4QymU7WsAUwCf+ICW75cuR91nJIc=
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -2677,8 +2688,8 @@ modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
modernc.org/libc v1.21.4 h1:CzTlumWeIbPV5/HVIMzYHNPCRP8uiU/CWiN2gtd/Qu8=
modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
modernc.org/libc v1.21.5 h1:xBkU9fnHV+hvZuPSRszN0AXDG4M7nwPLwTWwkYcvLCI=
modernc.org/libc v1.21.5/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
@ -2686,8 +2697,8 @@ modernc.org/memory v1.4.0 h1:crykUfNSnMAXaOJnnxcSzbUGMqkLWjklJKkBK2nwZwk=
modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.19.4 h1:nlPIDqumn6/mSvs7T5C8MNYEuN73sISzPdKtMdURpUI=
modernc.org/sqlite v1.19.4/go.mod h1:x/yZNb3h5+I3zGQSlwIv4REL5eJhiRkUH5MReogAeIc=
modernc.org/sqlite v1.20.0 h1:80zmD3BGkm8BZ5fUi/4lwJQHiO3GXgIUvZRXpoIfROY=
modernc.org/sqlite v1.20.0/go.mod h1:EsYz8rfOvLCiYTy5ZFsOYzoCcRMu98YYkwAcCw5YIYw=
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=

4
k8s/helm_charts2/Chart.yaml

@ -1,5 +1,5 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
appVersion: "3.35"
version: "3.35"
appVersion: "3.37"
version: "3.37"

11
k8s/helm_charts2/templates/filer-statefulset.yaml

@ -101,7 +101,12 @@ spec:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
- | - |
exec /usr/bin/weed -logdir=/logs \
exec /usr/bin/weed \
{{- if eq .Values.filer.logs.type "hostPath" }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
{{- end }}
{{- if .Values.filer.loggingOverrideLevel }} {{- if .Values.filer.loggingOverrideLevel }}
-v={{ .Values.filer.loggingOverrideLevel }} \ -v={{ .Values.filer.loggingOverrideLevel }} \
{{- else }} {{- else }}
@ -225,13 +230,13 @@ spec:
{{- if eq .Values.filer.logs.type "hostPath" }} {{- if eq .Values.filer.logs.type "hostPath" }}
- name: seaweedfs-filer-log-volume - name: seaweedfs-filer-log-volume
hostPath: hostPath:
path: /storage/logs/seaweedfs/filer
path: {{ .Values.filer.logs.hostPathPrefix }}/logs/seaweedfs/filer
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
{{- if eq .Values.filer.data.type "hostPath" }} {{- if eq .Values.filer.data.type "hostPath" }}
- name: data-filer - name: data-filer
hostPath: hostPath:
path: /storage/filer_store
path: {{ .Values.filer.data.hostPathPrefix }}/filer_store
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
- name: db-schema-config-volume - name: db-schema-config-volume

11
k8s/helm_charts2/templates/master-statefulset.yaml

@ -90,7 +90,12 @@ spec:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
- | - |
exec /usr/bin/weed -logdir=/logs \
exec /usr/bin/weed \
{{- if eq .Values.master.logs.type "hostPath" }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
{{- end }}
{{- if .Values.master.loggingOverrideLevel }} {{- if .Values.master.loggingOverrideLevel }}
-v={{ .Values.master.loggingOverrideLevel }} \ -v={{ .Values.master.loggingOverrideLevel }} \
{{- else }} {{- else }}
@ -196,13 +201,13 @@ spec:
{{- if eq .Values.master.logs.type "hostPath" }} {{- if eq .Values.master.logs.type "hostPath" }}
- name: seaweedfs-master-log-volume - name: seaweedfs-master-log-volume
hostPath: hostPath:
path: /storage/logs/seaweedfs/master
path: {{ .Values.master.logs.hostPathPrefix }}/logs/seaweedfs/master
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
{{- if eq .Values.master.data.type "hostPath" }} {{- if eq .Values.master.data.type "hostPath" }}
- name: data-{{ .Release.Namespace }} - name: data-{{ .Release.Namespace }}
hostPath: hostPath:
path: /ssd/seaweed-master/
path: {{ .Values.master.data.hostPathPrefix }}/seaweed-master/
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
{{- if .Values.global.enableSecurity }} {{- if .Values.global.enableSecurity }}

10
k8s/helm_charts2/templates/s3-deployment.yaml

@ -72,13 +72,19 @@ spec:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
- | - |
exec /usr/bin/weed -logdir=/logs \
exec /usr/bin/weed \
{{- if eq .Values.s3.logs.type "hostPath" }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
{{- end }}
{{- if .Values.s3.loggingOverrideLevel }} {{- if .Values.s3.loggingOverrideLevel }}
-v={{ .Values.s3.loggingOverrideLevel }} \ -v={{ .Values.s3.loggingOverrideLevel }} \
{{- else }} {{- else }}
-v={{ .Values.global.loggingLevel }} \ -v={{ .Values.global.loggingLevel }} \
{{- end }} {{- end }}
s3 \ s3 \
-ip.bind={{ .Values.s3.bindAddress }} \
-port={{ .Values.s3.port }} \ -port={{ .Values.s3.port }} \
{{- if .Values.s3.metricsPort }} {{- if .Values.s3.metricsPort }}
-metricsPort {{ .Values.s3.metricsPort }} \ -metricsPort {{ .Values.s3.metricsPort }} \
@ -167,7 +173,7 @@ spec:
{{- if eq .Values.s3.logs.type "hostPath" }} {{- if eq .Values.s3.logs.type "hostPath" }}
- name: logs - name: logs
hostPath: hostPath:
path: /storage/logs/seaweedfs/s3
path: {{ .Values.s3.logs.hostPathPrefix }}/logs/seaweedfs/s3
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
{{- if .Values.global.enableSecurity }} {{- if .Values.global.enableSecurity }}

13
k8s/helm_charts2/templates/volume-statefulset.yaml

@ -93,7 +93,12 @@ spec:
- "/bin/sh" - "/bin/sh"
- "-ec" - "-ec"
- | - |
exec /usr/bin/weed -logdir=/logs \
exec /usr/bin/weed \
{{- if eq .Values.volume.logs.type "hostPath" }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
{{- end }}
{{- if .Values.volume.loggingOverrideLevel }} {{- if .Values.volume.loggingOverrideLevel }}
-v={{ .Values.volume.loggingOverrideLevel }} \ -v={{ .Values.volume.loggingOverrideLevel }} \
{{- else }} {{- else }}
@ -206,19 +211,19 @@ spec:
{{- if eq .Values.volume.data.type "hostPath" }} {{- if eq .Values.volume.data.type "hostPath" }}
- name: data - name: data
hostPath: hostPath:
path: /storage/object_store/
path: {{ .Values.volume.data.hostPathPrefix }}/object_store/
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
{{- if and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx }} {{- if and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx }}
- name: idx - name: idx
hostPath: hostPath:
path: /ssd/seaweedfs-volume-idx/
path: {{ .Values.volume.idx.hostPathPrefix }}/seaweedfs-volume-idx/
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
{{- if eq .Values.volume.logs.type "hostPath" }} {{- if eq .Values.volume.logs.type "hostPath" }}
- name: logs - name: logs
hostPath: hostPath:
path: /storage/logs/seaweedfs/volume
path: {{ .Values.volume.logs.hostPathPrefix }}/logs/seaweedfs/volume
type: DirectoryOrCreate type: DirectoryOrCreate
{{- end }} {{- end }}
{{- if .Values.global.enableSecurity }} {{- if .Values.global.enableSecurity }}

11
k8s/helm_charts2/values.yaml

@ -68,11 +68,13 @@ master:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /ssd
logs: logs:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /storage
initContainers: "" initContainers: ""
@ -176,15 +178,19 @@ volume:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /storage
idx: idx:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /ssd
logs: logs:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /storage
# limit background compaction or copying speed in mega bytes per second # limit background compaction or copying speed in mega bytes per second
compactionMBps: "50" compactionMBps: "50"
@ -308,11 +314,13 @@ filer:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /storage
logs: logs:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /storage
initContainers: "" initContainers: ""
@ -418,6 +426,7 @@ s3:
imageTag: null imageTag: null
restartPolicy: null restartPolicy: null
replicas: 1 replicas: 1
bindAddress: 0.0.0.0
port: 8333 port: 8333
metricsPort: 9327 metricsPort: 9327
loggingOverrideLevel: null loggingOverrideLevel: null
@ -463,7 +472,7 @@ s3:
type: "hostPath" type: "hostPath"
size: "" size: ""
storageClass: "" storageClass: ""
hostPathPrefix: /storage
certificates: certificates:
commonName: "SeaweedFS CA" commonName: "SeaweedFS CA"

22
weed/Makefile

@ -15,44 +15,44 @@ clean:
debug_shell: debug_shell:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- shell
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- shell
debug_mount: debug_mount:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/ -umask=000
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/ -umask=000
debug_server: debug_server:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- server -dir=~/tmp/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- server -dir=~/tmp/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1
debug_volume: debug_volume:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- volume -dir=~/tmp/100 -port 8564 -max=30 -preStopSeconds=2
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- volume -dir=~/tmp/100 -port 8564 -max=30 -preStopSeconds=2
debug_webdav: debug_webdav:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 webdav
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 webdav
debug_s3: debug_s3:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 s3
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 s3
debug_filer_copy: debug_filer_copy:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h
debug_filer_remote_sync_dir: debug_filer_remote_sync_dir:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=1h
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=1h
debug_filer_remote_sync_buckets: debug_filer_remote_sync_buckets:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -createBucketAt=cloud1 -timeAgo=1h
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.remote.sync -filer="localhost:8888" -createBucketAt=cloud1 -timeAgo=1h
debug_master_follower: debug_master_follower:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 master.follower
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 master.follower
debug_filer_sync: debug_filer_sync:
go build -gcflags="all=-N -l" go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.sync -a 192.168.2.7:8888 -b 192.168.2.7:8889 -isActivePassive -b.debug
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.sync -a 192.168.2.7:8888 -b 192.168.2.7:8889 -isActivePassive -b.debug

2
weed/command/filer.go

@ -162,7 +162,7 @@ func runFiler(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
go stats_collect.StartMetricsServer(*f.metricsHttpPort)
go stats_collect.StartMetricsServer(*f.bindIp, *f.metricsHttpPort)
filerAddress := util.JoinHostPort(*f.ip, *f.port) filerAddress := util.JoinHostPort(*f.ip, *f.port)
startDelay := time.Duration(2) startDelay := time.Duration(2)

22
weed/command/filer_sync.go

@ -43,6 +43,7 @@ type SyncOptions struct {
bFromTsMs *int64 bFromTsMs *int64
aProxyByFiler *bool aProxyByFiler *bool
bProxyByFiler *bool bProxyByFiler *bool
metricsHttpIp *string
metricsHttpPort *int metricsHttpPort *int
concurrency *int concurrency *int
clientId int32 clientId int32
@ -86,6 +87,7 @@ func init() {
syncOptions.concurrency = cmdFilerSynchronize.Flag.Int("concurrency", DefaultConcurrencyLimit, "The maximum number of files that will be synced concurrently.") syncOptions.concurrency = cmdFilerSynchronize.Flag.Int("concurrency", DefaultConcurrencyLimit, "The maximum number of files that will be synced concurrently.")
syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file") syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file")
syncMemProfile = cmdFilerSynchronize.Flag.String("memprofile", "", "memory profile output file") syncMemProfile = cmdFilerSynchronize.Flag.String("memprofile", "", "memory profile output file")
syncOptions.metricsHttpIp = cmdFilerSynchronize.Flag.String("metricsIp", "", "metrics listen ip")
syncOptions.metricsHttpPort = cmdFilerSynchronize.Flag.Int("metricsPort", 0, "metrics listen port") syncOptions.metricsHttpPort = cmdFilerSynchronize.Flag.Int("metricsPort", 0, "metrics listen port")
syncOptions.clientId = util.RandomInt32() syncOptions.clientId = util.RandomInt32()
} }
@ -119,7 +121,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
filerB := pb.ServerAddress(*syncOptions.filerB) filerB := pb.ServerAddress(*syncOptions.filerB)
// start filer.sync metrics server // start filer.sync metrics server
go statsCollect.StartMetricsServer(*syncOptions.metricsHttpPort)
go statsCollect.StartMetricsServer(*syncOptions.metricsHttpIp, *syncOptions.metricsHttpPort)
// read a filer signature // read a filer signature
aFilerSignature, aFilerErr := replication.ReadFilerSignature(grpcDialOption, filerA) aFilerSignature, aFilerErr := replication.ReadFilerSignature(grpcDialOption, filerA)
@ -397,7 +399,11 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str
return nil return nil
} }
key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath)
return dataSink.CreateEntry(key, message.NewEntry, message.Signatures)
if err := dataSink.CreateEntry(key, message.NewEntry, message.Signatures); err != nil {
return fmt.Errorf("create entry1 : %v", err)
} else {
return nil
}
} }
// this is something special? // this is something special?
@ -425,7 +431,11 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str
} }
// create the new entry // create the new entry
newKey := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) newKey := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath)
return dataSink.CreateEntry(newKey, message.NewEntry, message.Signatures)
if err := dataSink.CreateEntry(newKey, message.NewEntry, message.Signatures); err != nil {
return fmt.Errorf("create entry2 : %v", err)
} else {
return nil
}
} else { } else {
// new key is outside of the watched directory // new key is outside of the watched directory
@ -439,7 +449,11 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str
if strings.HasPrefix(string(sourceNewKey), sourcePath) { if strings.HasPrefix(string(sourceNewKey), sourcePath) {
// new key is in the watched directory // new key is in the watched directory
key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath)
return dataSink.CreateEntry(key, message.NewEntry, message.Signatures)
if err := dataSink.CreateEntry(key, message.NewEntry, message.Signatures); err != nil {
return fmt.Errorf("create entry3 : %v", err)
} else {
return nil
}
} else { } else {
// new key is also outside of the watched directory // new key is also outside of the watched directory
// skip // skip

2
weed/command/master.go

@ -119,7 +119,7 @@ func runMaster(cmd *Command, args []string) bool {
glog.Fatalf("volumeSizeLimitMB should be smaller than 30000") glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
} }
go stats_collect.StartMetricsServer(*m.metricsHttpPort)
go stats_collect.StartMetricsServer(*m.ipBind, *m.metricsHttpPort)
startMaster(m, masterWhiteList) startMaster(m, masterWhiteList)
return true return true

2
weed/command/s3.go

@ -144,7 +144,7 @@ func runS3(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpPort)
go stats_collect.StartMetricsServer(*s3StandaloneOptions.bindIp, *s3StandaloneOptions.metricsHttpPort)
return s3StandaloneOptions.startS3Server() return s3StandaloneOptions.startS3Server()

2
weed/command/server.go

@ -233,7 +233,7 @@ func runServer(cmd *Command, args []string) bool {
webdavOptions.filer = &filerAddress webdavOptions.filer = &filerAddress
mqBrokerOptions.filerGroup = filerOptions.filerGroup mqBrokerOptions.filerGroup = filerOptions.filerGroup
go stats_collect.StartMetricsServer(*serverMetricsHttpPort)
go stats_collect.StartMetricsServer(*serverBindIp, *serverMetricsHttpPort)
folders := strings.Split(*volumeDataFolders, ",") folders := strings.Split(*volumeDataFolders, ",")

2
weed/command/volume.go

@ -130,7 +130,7 @@ func runVolume(cmd *Command, args []string) bool {
grace.SetupProfiling(*v.cpuProfile, *v.memProfile) grace.SetupProfiling(*v.cpuProfile, *v.memProfile)
} }
go stats_collect.StartMetricsServer(*v.metricsHttpPort)
go stats_collect.StartMetricsServer(*v.bindIp, *v.metricsHttpPort)
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent) minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
v.masters = pb.ServerAddresses(*v.mastersString).ToAddresses() v.masters = pb.ServerAddresses(*v.mastersString).ToAddresses()

5
weed/command/webdav.go

@ -13,7 +13,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/server"
weed_server "github.com/seaweedfs/seaweedfs/weed/server"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
) )
@ -23,6 +23,7 @@ var (
type WebDavOption struct { type WebDavOption struct {
filer *string filer *string
filerRootPath *string
port *int port *int
collection *string collection *string
replication *string replication *string
@ -44,6 +45,7 @@ func init() {
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file") webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks") webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 0, "local cache capacity in MB") webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 0, "local cache capacity in MB")
webDavStandaloneOptions.filerRootPath = cmdWebDav.Flag.String("filer.path", "/", "use this remote path from filer server")
} }
var cmdWebDav = &Command{ var cmdWebDav = &Command{
@ -104,6 +106,7 @@ func (wo *WebDavOption) startWebDav() bool {
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
Filer: filerAddress, Filer: filerAddress,
FilerRootPath: *wo.filerRootPath,
GrpcDialOption: grpcDialOption, GrpcDialOption: grpcDialOption,
Collection: *wo.collection, Collection: *wo.collection,
Replication: *wo.replication, Replication: *wo.replication,

3
weed/filer/filechunks.go

@ -42,6 +42,9 @@ func ETag(entry *filer_pb.Entry) (etag string) {
} }
func ETagEntry(entry *Entry) (etag string) { func ETagEntry(entry *Entry) (etag string) {
if entry.IsInRemoteOnly() {
return entry.Remote.RemoteETag
}
if entry.Attr.Md5 == nil { if entry.Attr.Md5 == nil {
return ETagChunks(entry.GetChunks()) return ETagChunks(entry.GetChunks())
} }

9
weed/filer/filer_notify.go

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"regexp"
"strings" "strings"
"time" "time"
@ -108,6 +109,10 @@ func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
} }
} }
var (
VolumeNotFoundPattern = regexp.MustCompile(`volume \d+? not found`)
)
func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) { func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) {
startTime = startTime.UTC() startTime = startTime.UTC()
@ -159,6 +164,10 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, each
if err == io.EOF { if err == io.EOF {
continue continue
} }
if VolumeNotFoundPattern.MatchString(err.Error()) {
glog.Warningf("skipping reading %s: %v", hourMinuteEntry.FullPath, err)
continue
}
return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err) return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
} }
chunkedFileReader.Close() chunkedFileReader.Close()

23
weed/filer/filerstore_wrapper.go

@ -2,13 +2,14 @@ package filer
import ( import (
"context" "context"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/viant/ptrie"
"io" "io"
"math" "math"
"strings" "strings"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/viant/ptrie"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
@ -186,9 +187,12 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
} }
if len(existingEntry.HardLinkId) != 0 { if len(existingEntry.HardLinkId) != 0 {
// remove hard link // remove hard link
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
op := ctx.Value("OP")
if op != "MV" {
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}
} }
} }
@ -206,9 +210,12 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry
if len(existingEntry.HardLinkId) != 0 { if len(existingEntry.HardLinkId) != 0 {
// remove hard link // remove hard link
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
op := ctx.Value("OP")
if op != "MV" {
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}
} }
} }

35
weed/glog/glog_file.go

@ -25,6 +25,7 @@ import (
"os" "os"
"os/user" "os/user"
"path/filepath" "path/filepath"
"sort"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -32,6 +33,7 @@ import (
// MaxSize is the maximum size of a log file in bytes. // MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800 var MaxSize uint64 = 1024 * 1024 * 1800
var MaxFileCount = 5
// logDirs lists the candidate directories for new log files. // logDirs lists the candidate directories for new log files.
var logDirs []string var logDirs []string
@ -43,8 +45,9 @@ var logDir = flag.String("logdir", "", "If non-empty, write log files in this di
func createLogDirs() { func createLogDirs() {
if *logDir != "" { if *logDir != "" {
logDirs = append(logDirs, *logDir) logDirs = append(logDirs, *logDir)
} else {
logDirs = append(logDirs, os.TempDir())
} }
logDirs = append(logDirs, os.TempDir())
} }
var ( var (
@ -96,6 +99,15 @@ func logName(tag string, t time.Time) (name, link string) {
return name, program + "." + tag return name, program + "." + tag
} }
func prefix(tag string) string {
return fmt.Sprintf("%s.%s.%s.log.%s.",
program,
host,
userName,
tag,
)
}
var onceLogDirs sync.Once var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which // create creates a new log file and returns the file and its filename, which
@ -108,8 +120,29 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) {
return nil, "", errors.New("log: no log dirs") return nil, "", errors.New("log: no log dirs")
} }
name, link := logName(tag, t) name, link := logName(tag, t)
logPrefix := prefix(tag)
var lastErr error var lastErr error
for _, dir := range logDirs { for _, dir := range logDirs {
// remove old logs
entries, _ := os.ReadDir(dir)
var previousLogs []string
for _, entry := range entries {
if strings.HasPrefix(entry.Name(), logPrefix) {
previousLogs = append(previousLogs, entry.Name())
}
}
if len(previousLogs) >= MaxFileCount {
sort.Strings(previousLogs)
for i, entry := range previousLogs {
if i > len(previousLogs)-MaxFileCount {
break
}
os.Remove(filepath.Join(dir, entry))
}
}
// create new log file
fname := filepath.Join(dir, name) fname := filepath.Join(dir, name)
f, err := os.Create(fname) f, err := os.Create(fname)
if err == nil { if err == nil {

1
weed/mount/dirty_pages_chunked.go

@ -80,7 +80,6 @@ func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reade
} }
chunk.ModifiedTsNs = mtime chunk.ModifiedTsNs = mtime
pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) pages.fh.AddChunks([]*filer_pb.FileChunk{chunk})
pages.fh.entryViewCache = nil
glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size) glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size)
} }

30
weed/mount/filehandle.go

@ -1,16 +1,14 @@
package mount package mount
import ( import (
"golang.org/x/sync/semaphore"
"math"
"sync"
"golang.org/x/exp/slices"
"github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"golang.org/x/exp/slices"
"golang.org/x/sync/semaphore"
"math"
"sync"
) )
type FileHandleId uint64 type FileHandleId uint64
@ -18,7 +16,7 @@ type FileHandleId uint64
type FileHandle struct { type FileHandle struct {
fh FileHandleId fh FileHandleId
counter int64 counter int64
entry *filer_pb.Entry
entry *LockedEntry
entryLock sync.Mutex entryLock sync.Mutex
inode uint64 inode uint64
wfs *WFS wfs *WFS
@ -48,6 +46,9 @@ func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_p
if entry != nil { if entry != nil {
entry.Attributes.FileSize = filer.FileSize(entry) entry.Attributes.FileSize = filer.FileSize(entry)
} }
fh.entry = &LockedEntry{
Entry: entry,
}
return fh return fh
} }
@ -58,22 +59,15 @@ func (fh *FileHandle) FullPath() util.FullPath {
} }
func (fh *FileHandle) GetEntry() *filer_pb.Entry { func (fh *FileHandle) GetEntry() *filer_pb.Entry {
fh.entryLock.Lock()
defer fh.entryLock.Unlock()
return fh.entry
return fh.entry.GetEntry()
} }
func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) { func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) {
fh.entryLock.Lock()
defer fh.entryLock.Unlock()
fh.entry = entry
fh.entry.SetEntry(entry)
} }
func (fh *FileHandle) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry { func (fh *FileHandle) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry {
fh.entryLock.Lock()
defer fh.entryLock.Unlock()
fn(fh.entry)
return fh.entry
return fh.entry.UpdateEntry(fn)
} }
func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) { func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
@ -107,7 +101,7 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.GetChunks()), len(chunks)) glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.GetChunks()), len(chunks))
fh.entry.Chunks = append(fh.entry.GetChunks(), newChunks...)
fh.entry.AppendChunks(newChunks)
fh.entryViewCache = nil fh.entryViewCache = nil
} }

2
weed/mount/filehandle_map.go

@ -50,7 +50,7 @@ func (i *FileHandleToInode) AcquireFileHandle(wfs *WFS, inode uint64, entry *fil
} else { } else {
fh.counter++ fh.counter++
} }
if fh.entry != entry {
if fh.GetEntry() != entry {
fh.SetEntry(entry) fh.SetEntry(entry)
} }
return fh return fh

2
weed/mount/filehandle_read.go

@ -26,7 +26,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
fileFullPath := fh.FullPath() fileFullPath := fh.FullPath()
entry := fh.entry
entry := fh.GetEntry()
if entry == nil { if entry == nil {
return 0, io.EOF return 0, io.EOF
} }

42
weed/mount/locked_entry.go

@ -0,0 +1,42 @@
package mount
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"sync"
)
type LockedEntry struct {
*filer_pb.Entry
sync.RWMutex
}
func (le *LockedEntry) GetEntry() *filer_pb.Entry {
le.RLock()
defer le.RUnlock()
return le.Entry
}
func (le *LockedEntry) SetEntry(entry *filer_pb.Entry) {
le.Lock()
defer le.Unlock()
le.Entry = entry
}
func (le *LockedEntry) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry {
le.Lock()
defer le.Unlock()
fn(le.Entry)
return le.Entry
}
func (le *LockedEntry) GetChunks() []*filer_pb.FileChunk {
le.RLock()
defer le.RUnlock()
return le.Entry.Chunks
}
func (le *LockedEntry) AppendChunks(newChunks []*filer_pb.FileChunk) {
le.Lock()
defer le.Unlock()
le.Entry.Chunks = append(le.Entry.Chunks, newChunks...)
}

4
weed/mount/meta_cache/meta_cache.go

@ -2,12 +2,13 @@ package meta_cache
import ( import (
"context" "context"
"os"
"github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/filer/leveldb" "github.com/seaweedfs/seaweedfs/weed/filer/leveldb"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"os"
) )
// need to have logic similar to FilerStoreWrapper // need to have logic similar to FilerStoreWrapper
@ -76,6 +77,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
// skip the unnecessary deletion // skip the unnecessary deletion
// leave the update to the following InsertEntry operation // leave the update to the following InsertEntry operation
} else { } else {
ctx = context.WithValue(ctx, "OP", "MV")
glog.V(3).Infof("DeleteEntry %s", oldPath) glog.V(3).Infof("DeleteEntry %s", oldPath)
if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil { if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
return err return err

68
weed/mount/page_writer/upload_pipeline.go

@ -6,26 +6,24 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
) )
type LogicChunkIndex int type LogicChunkIndex int
type UploadPipeline struct { type UploadPipeline struct {
uploaderCount int32
uploaderCountCond *sync.Cond
filepath util.FullPath
ChunkSize int64
writableChunks map[LogicChunkIndex]PageChunk
writableChunksLock sync.Mutex
sealedChunks map[LogicChunkIndex]*SealedChunk
sealedChunksLock sync.Mutex
uploaders *util.LimitedConcurrentExecutor
saveToStorageFn SaveToStorageFunc
activeReadChunks map[LogicChunkIndex]int
activeReadChunksLock sync.Mutex
writableChunkLimit int
swapFile *SwapFile
uploaderCount int32
uploaderCountCond *sync.Cond
filepath util.FullPath
ChunkSize int64
uploaders *util.LimitedConcurrentExecutor
saveToStorageFn SaveToStorageFunc
writableChunkLimit int
swapFile *SwapFile
chunksLock sync.Mutex
writableChunks map[LogicChunkIndex]PageChunk
sealedChunks map[LogicChunkIndex]*SealedChunk
activeReadChunks map[LogicChunkIndex]int
readerCountCond *sync.Cond
} }
type SealedChunk struct { type SealedChunk struct {
@ -42,7 +40,7 @@ func (sc *SealedChunk) FreeReference(messageOnFree string) {
} }
func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline { func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline {
return &UploadPipeline{
t := &UploadPipeline{
ChunkSize: chunkSize, ChunkSize: chunkSize,
writableChunks: make(map[LogicChunkIndex]PageChunk), writableChunks: make(map[LogicChunkIndex]PageChunk),
sealedChunks: make(map[LogicChunkIndex]*SealedChunk), sealedChunks: make(map[LogicChunkIndex]*SealedChunk),
@ -53,11 +51,13 @@ func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64,
writableChunkLimit: bufferChunkLimit, writableChunkLimit: bufferChunkLimit,
swapFile: NewSwapFile(swapFileDir, chunkSize), swapFile: NewSwapFile(swapFileDir, chunkSize),
} }
t.readerCountCond = sync.NewCond(&t.chunksLock)
return t
} }
func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) { func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) {
up.writableChunksLock.Lock()
defer up.writableChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
@ -74,7 +74,6 @@ func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n
} }
} }
up.moveToSealed(up.writableChunks[fullestChunkIndex], fullestChunkIndex) up.moveToSealed(up.writableChunks[fullestChunkIndex], fullestChunkIndex)
delete(up.writableChunks, fullestChunkIndex)
// fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, fullness) // fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, fullness)
} }
if isSequential && if isSequential &&
@ -95,13 +94,17 @@ func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n
func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
up.chunksLock.Lock()
defer func() {
up.readerCountCond.Signal()
up.chunksLock.Unlock()
}()
// read from sealed chunks first // read from sealed chunks first
up.sealedChunksLock.Lock()
sealedChunk, found := up.sealedChunks[logicChunkIndex] sealedChunk, found := up.sealedChunks[logicChunkIndex]
if found { if found {
sealedChunk.referenceCounter++ sealedChunk.referenceCounter++
} }
up.sealedChunksLock.Unlock()
if found { if found {
maxStop = sealedChunk.chunk.ReadDataAt(p, off) maxStop = sealedChunk.chunk.ReadDataAt(p, off)
glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop)
@ -109,8 +112,6 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
} }
// read from writable chunks last // read from writable chunks last
up.writableChunksLock.Lock()
defer up.writableChunksLock.Unlock()
writableChunk, found := up.writableChunks[logicChunkIndex] writableChunk, found := up.writableChunks[logicChunkIndex]
if !found { if !found {
return return
@ -123,8 +124,8 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
} }
func (up *UploadPipeline) FlushAll() { func (up *UploadPipeline) FlushAll() {
up.writableChunksLock.Lock()
defer up.writableChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for logicChunkIndex, memChunk := range up.writableChunks { for logicChunkIndex, memChunk := range up.writableChunks {
up.moveToSealed(memChunk, logicChunkIndex) up.moveToSealed(memChunk, logicChunkIndex)
@ -143,8 +144,6 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic
atomic.AddInt32(&up.uploaderCount, 1) atomic.AddInt32(&up.uploaderCount, 1)
glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount) glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount)
up.sealedChunksLock.Lock()
if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found { if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found {
oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex)) oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex))
} }
@ -155,8 +154,8 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic
up.sealedChunks[logicChunkIndex] = sealedChunk up.sealedChunks[logicChunkIndex] = sealedChunk
delete(up.writableChunks, logicChunkIndex) delete(up.writableChunks, logicChunkIndex)
up.sealedChunksLock.Unlock()
// unlock before submitting the uploading jobs
up.chunksLock.Unlock()
up.uploaders.Execute(func() { up.uploaders.Execute(func() {
// first add to the file chunks // first add to the file chunks
sealedChunk.chunk.SaveContent(up.saveToStorageFn) sealedChunk.chunk.SaveContent(up.saveToStorageFn)
@ -172,24 +171,25 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic
up.uploaderCountCond.L.Unlock() up.uploaderCountCond.L.Unlock()
// wait for readers // wait for readers
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for up.IsLocked(logicChunkIndex) { for up.IsLocked(logicChunkIndex) {
time.Sleep(59 * time.Millisecond)
up.readerCountCond.Wait()
} }
// then remove from sealed chunks // then remove from sealed chunks
up.sealedChunksLock.Lock()
defer up.sealedChunksLock.Unlock()
delete(up.sealedChunks, logicChunkIndex) delete(up.sealedChunks, logicChunkIndex)
sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex)) sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex))
}) })
up.chunksLock.Lock()
} }
func (up *UploadPipeline) Shutdown() { func (up *UploadPipeline) Shutdown() {
up.swapFile.FreeResource() up.swapFile.FreeResource()
up.sealedChunksLock.Lock()
defer up.sealedChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for logicChunkIndex, sealedChunk := range up.sealedChunks { for logicChunkIndex, sealedChunk := range up.sealedChunks {
sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex)) sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex))
} }

10
weed/mount/page_writer/upload_pipeline_lock.go

@ -10,8 +10,8 @@ func (up *UploadPipeline) LockForRead(startOffset, stopOffset int64) {
if stopOffset%up.ChunkSize > 0 { if stopOffset%up.ChunkSize > 0 {
stopLogicChunkIndex += 1 stopLogicChunkIndex += 1
} }
up.activeReadChunksLock.Lock()
defer up.activeReadChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ { for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ {
if count, found := up.activeReadChunks[i]; found { if count, found := up.activeReadChunks[i]; found {
up.activeReadChunks[i] = count + 1 up.activeReadChunks[i] = count + 1
@ -27,8 +27,8 @@ func (up *UploadPipeline) UnlockForRead(startOffset, stopOffset int64) {
if stopOffset%up.ChunkSize > 0 { if stopOffset%up.ChunkSize > 0 {
stopLogicChunkIndex += 1 stopLogicChunkIndex += 1
} }
up.activeReadChunksLock.Lock()
defer up.activeReadChunksLock.Unlock()
up.chunksLock.Lock()
defer up.chunksLock.Unlock()
for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ { for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ {
if count, found := up.activeReadChunks[i]; found { if count, found := up.activeReadChunks[i]; found {
if count == 1 { if count == 1 {
@ -41,8 +41,6 @@ func (up *UploadPipeline) UnlockForRead(startOffset, stopOffset int64) {
} }
func (up *UploadPipeline) IsLocked(logicChunkIndex LogicChunkIndex) bool { func (up *UploadPipeline) IsLocked(logicChunkIndex LogicChunkIndex) bool {
up.activeReadChunksLock.Lock()
defer up.activeReadChunksLock.Unlock()
if count, found := up.activeReadChunks[logicChunkIndex]; found { if count, found := up.activeReadChunks[logicChunkIndex]; found {
return count > 0 return count > 0
} }

2
weed/mount/weedfs_attr.go

@ -25,7 +25,7 @@ func (wfs *WFS) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse
} else { } else {
if fh, found := wfs.fhmap.FindFileHandle(inode); found { if fh, found := wfs.fhmap.FindFileHandle(inode); found {
out.AttrValid = 1 out.AttrValid = 1
wfs.setAttrByPbEntry(&out.Attr, inode, fh.entry)
wfs.setAttrByPbEntry(&out.Attr, inode, fh.entry.GetEntry())
out.Nlink = 0 out.Nlink = 0
return fuse.OK return fuse.OK
} }

6
weed/mount/weedfs_dir_lookup.go

@ -59,9 +59,9 @@ func (wfs *WFS) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name strin
if fh, found := wfs.fhmap.FindFileHandle(inode); found { if fh, found := wfs.fhmap.FindFileHandle(inode); found {
fh.entryLock.Lock() fh.entryLock.Lock()
if fh.entry != nil {
glog.V(4).Infof("lookup opened file %s size %d", dirPath.Child(localEntry.Name()), filer.FileSize(fh.entry))
localEntry = filer.FromPbEntry(string(dirPath), fh.entry)
if entry := fh.GetEntry(); entry != nil {
glog.V(4).Infof("lookup opened file %s size %d", dirPath.Child(localEntry.Name()), filer.FileSize(entry))
localEntry = filer.FromPbEntry(string(dirPath), entry)
} }
fh.entryLock.Unlock() fh.entryLock.Unlock()
} }

2
weed/mount/weedfs_dir_read.go

@ -173,7 +173,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl
} }
if fh, found := wfs.fhmap.FindFileHandle(inode); found { if fh, found := wfs.fhmap.FindFileHandle(inode); found {
glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name)) glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name))
entry = filer.FromPbEntry(string(dirPath), fh.entry)
entry = filer.FromPbEntry(string(dirPath), fh.GetEntry())
} }
wfs.outputFilerEntry(entryOut, inode, entry) wfs.outputFilerEntry(entryOut, inode, entry)
} }

2
weed/mount/weedfs_file_lseek.go

@ -41,7 +41,7 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO
fh.entryLock.Lock() fh.entryLock.Lock()
defer fh.entryLock.Unlock() defer fh.entryLock.Unlock()
fileSize := int64(filer.FileSize(fh.entry))
fileSize := int64(filer.FileSize(fh.GetEntry()))
offset := max(int64(in.Offset), 0) offset := max(int64(in.Offset), 0)
glog.V(4).Infof( glog.V(4).Infof(

12
weed/mount/weedfs_file_sync.go

@ -55,9 +55,6 @@ func (wfs *WFS) Flush(cancel <-chan struct{}, in *fuse.FlushIn) fuse.Status {
return fuse.ENOENT return fuse.ENOENT
} }
fh.orderedMutex.Acquire(context.Background(), 1)
defer fh.orderedMutex.Release(1)
return wfs.doFlush(fh, in.Uid, in.Gid) return wfs.doFlush(fh, in.Uid, in.Gid)
} }
@ -87,14 +84,14 @@ func (wfs *WFS) Fsync(cancel <-chan struct{}, in *fuse.FsyncIn) (code fuse.Statu
return fuse.ENOENT return fuse.ENOENT
} }
fh.orderedMutex.Acquire(context.Background(), 1)
defer fh.orderedMutex.Release(1)
return wfs.doFlush(fh, in.Uid, in.Gid) return wfs.doFlush(fh, in.Uid, in.Gid)
} }
func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
fh.orderedMutex.Acquire(context.Background(), 1)
defer fh.orderedMutex.Release(1)
// flush works at fh level // flush works at fh level
fileFullPath := fh.FullPath() fileFullPath := fh.FullPath()
dir, name := fileFullPath.DirAndName() dir, name := fileFullPath.DirAndName()
@ -117,11 +114,10 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
} }
err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
fh.entryLock.Lock() fh.entryLock.Lock()
defer fh.entryLock.Unlock() defer fh.entryLock.Unlock()
entry := fh.entry
entry := fh.GetEntry()
if entry == nil { if entry == nil {
return nil return nil
} }

2
weed/mount/weedfs_file_write.go

@ -49,7 +49,7 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr
fh.orderedMutex.Acquire(context.Background(), 1) fh.orderedMutex.Acquire(context.Background(), 1)
defer fh.orderedMutex.Release(1) defer fh.orderedMutex.Release(1)
entry := fh.entry
entry := fh.GetEntry()
if entry == nil { if entry == nil {
return 0, fuse.OK return 0, fuse.OK
} }

7
weed/mount/weedfs_rename.go

@ -235,8 +235,11 @@ func (wfs *WFS) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR
sourceInode, targetInode := wfs.inodeToPath.MovePath(oldPath, newPath) sourceInode, targetInode := wfs.inodeToPath.MovePath(oldPath, newPath)
if sourceInode != 0 { if sourceInode != 0 {
if fh, foundFh := wfs.fhmap.FindFileHandle(sourceInode); foundFh && fh.entry != nil {
fh.entry.Name = newName
fh, foundFh := wfs.fhmap.FindFileHandle(sourceInode)
if foundFh {
if entry := fh.GetEntry(); entry != nil {
entry.Name = newName
}
} }
// invalidate attr and data // invalidate attr and data
// wfs.fuseServer.InodeNotify(sourceInode, 0, -1) // wfs.fuseServer.InodeNotify(sourceInode, 0, -1)

3
weed/pb/volume_server.proto

@ -320,6 +320,8 @@ message ReadAllNeedlesResponse {
bool needle_blob_compressed = 6; bool needle_blob_compressed = 6;
uint64 last_modified = 7; uint64 last_modified = 7;
uint32 crc = 8; uint32 crc = 8;
bytes name = 9;
bytes mime = 10;
} }
message VolumeTailSenderRequest { message VolumeTailSenderRequest {
@ -530,6 +532,7 @@ message FetchAndWriteNeedleRequest {
remote_pb.RemoteStorageLocation remote_location = 16; remote_pb.RemoteStorageLocation remote_location = 16;
} }
message FetchAndWriteNeedleResponse { message FetchAndWriteNeedleResponse {
string e_tag = 1;
} }
// select on volume servers // select on volume servers

1407
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File

39
weed/replication/sink/filersink/fetch_write.go

@ -2,9 +2,11 @@ package filersink
import ( import (
"fmt" "fmt"
"sync"
"github.com/schollz/progressbar/v3"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"os"
"path/filepath"
"sync"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -19,6 +21,20 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path st
return return
} }
// a simple progress bar. Not ideal. Fix me.
var bar *progressbar.ProgressBar
if len(sourceChunks) > 1 {
name := filepath.Base(path)
bar = progressbar.NewOptions64(int64(len(sourceChunks)),
progressbar.OptionClearOnFinish(),
progressbar.OptionOnCompletion(func() {
fmt.Fprint(os.Stderr, "\n")
}),
progressbar.OptionFullWidth(),
progressbar.OptionSetDescription(name),
)
}
replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks))
var wg sync.WaitGroup var wg sync.WaitGroup
@ -27,12 +43,19 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path st
index, source := chunkIndex, sourceChunk index, source := chunkIndex, sourceChunk
fs.executor.Execute(func() { fs.executor.Execute(func() {
defer wg.Done() defer wg.Done()
replicatedChunk, e := fs.replicateOneChunk(source, path)
if e != nil {
err = e
return
}
replicatedChunks[index] = replicatedChunk
util.Retry("replicate chunks", func() error {
replicatedChunk, e := fs.replicateOneChunk(source, path)
if e != nil {
err = e
return e
}
replicatedChunks[index] = replicatedChunk
if bar != nil {
bar.Add(1)
}
err = nil
return nil
})
}) })
} }
wg.Wait() wg.Wait()

8
weed/replication/sink/filersink/filer_sink.go

@ -112,7 +112,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Directory: dir, Directory: dir,
Name: name, Name: name,
} }
glog.V(1).Infof("lookup: %v", lookupRequest)
// glog.V(1).Infof("lookup: %v", lookupRequest)
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
if filer.ETag(resp.Entry) == filer.ETag(entry) { if filer.ETag(resp.Entry) == filer.ETag(entry) {
glog.V(3).Infof("already replicated %s", key) glog.V(3).Infof("already replicated %s", key)
@ -125,9 +125,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
if err != nil { if err != nil {
// only warning here since the source chunk may have been deleted already // only warning here since the source chunk may have been deleted already
glog.Warningf("replicate entry chunks %s: %v", key, err) glog.Warningf("replicate entry chunks %s: %v", key, err)
return nil
} }
glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks)
// glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks)
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: dir, Directory: dir,
@ -205,7 +206,8 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
// replicate the chunks that are new in the source // replicate the chunks that are new in the source
replicatedChunks, err := fs.replicateChunks(newChunks, key) replicatedChunks, err := fs.replicateChunks(newChunks, key)
if err != nil { if err != nil {
return true, fmt.Errorf("replicate %s chunks error: %v", key, err)
glog.Warningf("replicate entry chunks %s: %v", key, err)
return true, nil
} }
existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...) existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...)
existingEntry.Attributes = newEntry.Attributes existingEntry.Attributes = newEntry.Attributes

11
weed/s3api/auth_credentials.go

@ -17,7 +17,10 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
) )
var IdentityAnonymous *Identity
var IdentityAnonymous = &Identity{
Name: s3account.AccountAnonymous.Name,
AccountId: s3account.AccountAnonymous.Id,
}
type Action string type Action string
@ -162,12 +165,6 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api
identities = append(identities, t) identities = append(identities, t)
} }
if IdentityAnonymous == nil {
IdentityAnonymous = &Identity{
Name: s3account.AccountAnonymous.Name,
AccountId: s3account.AccountAnonymous.Id,
}
}
iam.m.Lock() iam.m.Lock()
// atomically switch // atomically switch
iam.identities = identities iam.identities = identities

4
weed/s3api/auth_credentials_subscribe.go

@ -70,10 +70,10 @@ func (s3a *S3ApiServer) onBucketMetadataChange(dir string, oldEntry *filer_pb.En
if dir == s3a.option.BucketsPath { if dir == s3a.option.BucketsPath {
if newEntry != nil { if newEntry != nil {
s3a.bucketRegistry.LoadBucketMetadata(newEntry) s3a.bucketRegistry.LoadBucketMetadata(newEntry)
glog.V(0).Infof("updated bucketMetadata %s/%s", dir, newEntry)
glog.V(1).Infof("updated bucketMetadata %s/%s", dir, newEntry)
} else { } else {
s3a.bucketRegistry.RemoveBucketMetadata(oldEntry) s3a.bucketRegistry.RemoveBucketMetadata(oldEntry)
glog.V(0).Infof("remove bucketMetadata %s/%s", dir, newEntry)
glog.V(1).Infof("remove bucketMetadata %s/%s", dir, newEntry)
} }
} }
return nil return nil

70
weed/s3api/bucket_metadata.go

@ -107,7 +107,6 @@ func buildBucketMetadata(accountManager *s3account.AccountManager, entry *filer_
} }
} }
//access control policy
//owner //owner
acpOwnerBytes, ok := entry.Extended[s3_constants.ExtAmzOwnerKey] acpOwnerBytes, ok := entry.Extended[s3_constants.ExtAmzOwnerKey]
if ok && len(acpOwnerBytes) > 0 { if ok && len(acpOwnerBytes) > 0 {
@ -122,17 +121,31 @@ func buildBucketMetadata(accountManager *s3account.AccountManager, entry *filer_
} }
} }
} }
//grants //grants
acpGrantsBytes, ok := entry.Extended[s3_constants.ExtAmzAclKey] acpGrantsBytes, ok := entry.Extended[s3_constants.ExtAmzAclKey]
if ok && len(acpGrantsBytes) > 0 {
var grants []*s3.Grant
err := json.Unmarshal(acpGrantsBytes, &grants)
if err == nil {
bucketMetadata.Acl = grants
} else {
glog.Warningf("Unmarshal ACP grants: %s(%v), bucket: %s", string(acpGrantsBytes), err, bucketMetadata.Name)
if ok {
if len(acpGrantsBytes) > 0 {
var grants []*s3.Grant
err := json.Unmarshal(acpGrantsBytes, &grants)
if err == nil {
bucketMetadata.Acl = grants
} else {
glog.Warningf("Unmarshal ACP grants: %s(%v), bucket: %s", string(acpGrantsBytes), err, bucketMetadata.Name)
}
}
} else {
bucketMetadata.Acl = []*s3.Grant{
{
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: bucketMetadata.Owner.ID,
},
Permission: &s3_constants.PermissionFullControl,
},
} }
} }
} }
return bucketMetadata return bucketMetadata
} }
@ -143,17 +156,12 @@ func (r *BucketRegistry) RemoveBucketMetadata(entry *filer_pb.Entry) {
} }
func (r *BucketRegistry) GetBucketMetadata(bucketName string) (*BucketMetaData, s3err.ErrorCode) { func (r *BucketRegistry) GetBucketMetadata(bucketName string) (*BucketMetaData, s3err.ErrorCode) {
r.metadataCacheLock.RLock()
bucketMetadata, ok := r.metadataCache[bucketName]
r.metadataCacheLock.RUnlock()
if ok {
bucketMetadata := r.getMetadataCache(bucketName)
if bucketMetadata != nil {
return bucketMetadata, s3err.ErrNone return bucketMetadata, s3err.ErrNone
} }
r.notFoundLock.RLock()
_, ok = r.notFound[bucketName]
r.notFoundLock.RUnlock()
if ok {
if r.isNotFound(bucketName) {
return nil, s3err.ErrNoSuchBucket return nil, s3err.ErrNoSuchBucket
} }
@ -172,10 +180,8 @@ func (r *BucketRegistry) LoadBucketMetadataFromFiler(bucketName string) (*Bucket
defer r.notFoundLock.Unlock() defer r.notFoundLock.Unlock()
//check if already exists //check if already exists
r.metadataCacheLock.RLock()
bucketMetaData, ok := r.metadataCache[bucketName]
r.metadataCacheLock.RUnlock()
if ok {
bucketMetaData := r.getMetadataCache(bucketName)
if bucketMetaData != nil {
return bucketMetaData, s3err.ErrNone return bucketMetaData, s3err.ErrNone
} }
@ -184,6 +190,7 @@ func (r *BucketRegistry) LoadBucketMetadataFromFiler(bucketName string) (*Bucket
if err != nil { if err != nil {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
// The bucket doesn't actually exist and should no longer loaded from the filer // The bucket doesn't actually exist and should no longer loaded from the filer
glog.Warning("bucket not found in filer: ", bucketName)
r.notFound[bucketName] = struct{}{} r.notFound[bucketName] = struct{}{}
return nil, s3err.ErrNoSuchBucket return nil, s3err.ErrNoSuchBucket
} }
@ -192,6 +199,15 @@ func (r *BucketRegistry) LoadBucketMetadataFromFiler(bucketName string) (*Bucket
return bucketMetadata, s3err.ErrNone return bucketMetadata, s3err.ErrNone
} }
func (r *BucketRegistry) getMetadataCache(bucket string) *BucketMetaData {
r.metadataCacheLock.RLock()
defer r.metadataCacheLock.RUnlock()
if cache, ok := r.metadataCache[bucket]; ok {
return cache
}
return nil
}
func (r *BucketRegistry) setMetadataCache(metadata *BucketMetaData) { func (r *BucketRegistry) setMetadataCache(metadata *BucketMetaData) {
r.metadataCacheLock.Lock() r.metadataCacheLock.Lock()
defer r.metadataCacheLock.Unlock() defer r.metadataCacheLock.Unlock()
@ -204,10 +220,11 @@ func (r *BucketRegistry) removeMetadataCache(bucket string) {
delete(r.metadataCache, bucket) delete(r.metadataCache, bucket)
} }
func (r *BucketRegistry) markNotFound(bucket string) {
r.notFoundLock.Lock()
defer r.notFoundLock.Unlock()
r.notFound[bucket] = struct{}{}
func (r *BucketRegistry) isNotFound(bucket string) bool {
r.notFoundLock.RLock()
defer r.notFoundLock.RUnlock()
_, ok := r.notFound[bucket]
return ok
} }
func (r *BucketRegistry) unMarkNotFound(bucket string) { func (r *BucketRegistry) unMarkNotFound(bucket string) {
@ -215,3 +232,8 @@ func (r *BucketRegistry) unMarkNotFound(bucket string) {
defer r.notFoundLock.Unlock() defer r.notFoundLock.Unlock()
delete(r.notFound, bucket) delete(r.notFound, bucket)
} }
func (r *BucketRegistry) ClearCache(bucket string) {
r.removeMetadataCache(bucket)
r.unMarkNotFound(bucket)
}

50
weed/s3api/bucket_metadata_test.go

@ -86,7 +86,7 @@ var tcs = []*BucketMetadataTestCase{
{ {
badEntry, &BucketMetaData{ badEntry, &BucketMetaData{
Name: badEntry.Name, Name: badEntry.Name,
ObjectOwnership: s3_constants.DefaultOwnershipForExists,
ObjectOwnership: s3_constants.DefaultObjectOwnership,
Owner: &s3.Owner{ Owner: &s3.Owner{
DisplayName: &s3account.AccountAdmin.Name, DisplayName: &s3account.AccountAdmin.Name,
ID: &s3account.AccountAdmin.Id, ID: &s3account.AccountAdmin.Id,
@ -108,12 +108,20 @@ var tcs = []*BucketMetadataTestCase{
{ {
ownershipEmptyStr, &BucketMetaData{ ownershipEmptyStr, &BucketMetaData{
Name: ownershipEmptyStr.Name, Name: ownershipEmptyStr.Name,
ObjectOwnership: s3_constants.DefaultOwnershipForExists,
ObjectOwnership: s3_constants.DefaultObjectOwnership,
Owner: &s3.Owner{ Owner: &s3.Owner{
DisplayName: &s3account.AccountAdmin.Name, DisplayName: &s3account.AccountAdmin.Name,
ID: &s3account.AccountAdmin.Id, ID: &s3account.AccountAdmin.Id,
}, },
Acl: nil,
Acl: []*s3.Grant{
{
Permission: &s3_constants.PermissionFullControl,
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: &s3account.AccountAdmin.Id,
},
},
},
}, },
}, },
{ {
@ -124,35 +132,59 @@ var tcs = []*BucketMetadataTestCase{
DisplayName: &s3account.AccountAdmin.Name, DisplayName: &s3account.AccountAdmin.Name,
ID: &s3account.AccountAdmin.Id, ID: &s3account.AccountAdmin.Id,
}, },
Acl: nil,
Acl: []*s3.Grant{
{
Permission: &s3_constants.PermissionFullControl,
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: &s3account.AccountAdmin.Id,
},
},
},
}, },
}, },
{ {
acpEmptyStr, &BucketMetaData{ acpEmptyStr, &BucketMetaData{
Name: acpEmptyStr.Name, Name: acpEmptyStr.Name,
ObjectOwnership: s3_constants.DefaultOwnershipForExists,
ObjectOwnership: s3_constants.DefaultObjectOwnership,
Owner: &s3.Owner{ Owner: &s3.Owner{
DisplayName: &s3account.AccountAdmin.Name, DisplayName: &s3account.AccountAdmin.Name,
ID: &s3account.AccountAdmin.Id, ID: &s3account.AccountAdmin.Id,
}, },
Acl: nil,
Acl: []*s3.Grant{
{
Permission: &s3_constants.PermissionFullControl,
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: &s3account.AccountAdmin.Id,
},
},
},
}, },
}, },
{ {
acpEmptyObject, &BucketMetaData{ acpEmptyObject, &BucketMetaData{
Name: acpEmptyObject.Name, Name: acpEmptyObject.Name,
ObjectOwnership: s3_constants.DefaultOwnershipForExists,
ObjectOwnership: s3_constants.DefaultObjectOwnership,
Owner: &s3.Owner{ Owner: &s3.Owner{
DisplayName: &s3account.AccountAdmin.Name, DisplayName: &s3account.AccountAdmin.Name,
ID: &s3account.AccountAdmin.Id, ID: &s3account.AccountAdmin.Id,
}, },
Acl: nil,
Acl: []*s3.Grant{
{
Permission: &s3_constants.PermissionFullControl,
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: &s3account.AccountAdmin.Id,
},
},
},
}, },
}, },
{ {
acpOwnerNil, &BucketMetaData{ acpOwnerNil, &BucketMetaData{
Name: acpOwnerNil.Name, Name: acpOwnerNil.Name,
ObjectOwnership: s3_constants.DefaultOwnershipForExists,
ObjectOwnership: s3_constants.DefaultObjectOwnership,
Owner: &s3.Owner{ Owner: &s3.Owner{
DisplayName: &s3account.AccountAdmin.Name, DisplayName: &s3account.AccountAdmin.Name,
ID: &s3account.AccountAdmin.Id, ID: &s3account.AccountAdmin.Id,

24
weed/s3api/filer_multipart.go

@ -5,6 +5,7 @@ import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"math" "math"
@ -27,7 +28,7 @@ type InitiateMultipartUploadResult struct {
s3.CreateMultipartUploadOutput s3.CreateMultipartUploadOutput
} }
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) {
func (s3a *S3ApiServer) createMultipartUpload(initiatorId string, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) {
glog.V(2).Infof("createMultipartUpload input %v", input) glog.V(2).Infof("createMultipartUpload input %v", input)
@ -46,6 +47,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
if input.ContentType != nil { if input.ContentType != nil {
entry.Attributes.Mime = *input.ContentType entry.Attributes.Mime = *input.ContentType
} }
entry.Extended[s3_constants.ExtAmzMultipartInitiator] = []byte(initiatorId)
}); err != nil { }); err != nil {
glog.Errorf("NewMultipartUpload error: %v", err) glog.Errorf("NewMultipartUpload error: %v", err)
return nil, s3err.ErrInternalError return nil, s3err.ErrInternalError
@ -236,7 +238,7 @@ type ListMultipartUploadsResult struct {
Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
} }
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) {
func (s3a *S3ApiServer) listMultipartUploads(bucketMetaData *BucketMetaData, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
glog.V(2).Infof("listMultipartUploads input %v", input) glog.V(2).Infof("listMultipartUploads input %v", input)
@ -267,9 +269,27 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) { if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) {
continue continue
} }
initiatorId := string(entry.Extended[s3_constants.ExtAmzMultipartInitiator])
if initiatorId == "" {
initiatorId = *bucketMetaData.Owner.ID
}
initiatorDisplayName := s3a.accountManager.IdNameMapping[initiatorId]
ownerId := string(entry.Extended[s3_constants.ExtAmzOwnerKey])
if ownerId == "" {
ownerId = *bucketMetaData.Owner.ID
}
ownerDisplayName := s3a.accountManager.IdNameMapping[ownerId]
output.Upload = append(output.Upload, &s3.MultipartUpload{ output.Upload = append(output.Upload, &s3.MultipartUpload{
Key: objectKey(aws.String(key)), Key: objectKey(aws.String(key)),
UploadId: aws.String(entry.Name), UploadId: aws.String(entry.Name),
Owner: &s3.Owner{
ID: &initiatorId,
DisplayName: &ownerDisplayName,
},
Initiator: &s3.Initiator{
ID: &initiatorId,
DisplayName: &initiatorDisplayName,
},
}) })
uploadsCount += 1 uploadsCount += 1
} }

8
weed/s3api/filer_multipart_test.go

@ -57,19 +57,19 @@ func Test_findByPartNumber(t *testing.T) {
} }
parts := []CompletedPart{ parts := []CompletedPart{
CompletedPart{
{
ETag: "xxx", ETag: "xxx",
PartNumber: 1, PartNumber: 1,
}, },
CompletedPart{
{
ETag: "lll", ETag: "lll",
PartNumber: 1, PartNumber: 1,
}, },
CompletedPart{
{
ETag: "yyy", ETag: "yyy",
PartNumber: 3, PartNumber: 3,
}, },
CompletedPart{
{
ETag: "zzz", ETag: "zzz",
PartNumber: 5, PartNumber: 5,
}, },

4
weed/s3api/s3_constants/acp_ownership.go

@ -4,9 +4,7 @@ var (
OwnershipBucketOwnerPreferred = "BucketOwnerPreferred" OwnershipBucketOwnerPreferred = "BucketOwnerPreferred"
OwnershipObjectWriter = "ObjectWriter" OwnershipObjectWriter = "ObjectWriter"
OwnershipBucketOwnerEnforced = "BucketOwnerEnforced" OwnershipBucketOwnerEnforced = "BucketOwnerEnforced"
DefaultOwnershipForCreate = OwnershipObjectWriter
DefaultOwnershipForExists = OwnershipBucketOwnerEnforced
DefaultObjectOwnership = OwnershipBucketOwnerEnforced
) )
func ValidateOwnership(ownership string) bool { func ValidateOwnership(ownership string) bool {

7
weed/s3api/s3_constants/extend_key.go

@ -1,7 +1,8 @@
package s3_constants package s3_constants
const ( const (
ExtAmzOwnerKey = "Seaweed-X-Amz-Owner"
ExtAmzAclKey = "Seaweed-X-Amz-Acl"
ExtOwnershipKey = "Seaweed-X-Amz-Ownership"
ExtAmzOwnerKey = "Seaweed-X-Amz-Owner"
ExtAmzMultipartInitiator = "Seaweed-X-Amz-Multipart-Initiator"
ExtAmzAclKey = "Seaweed-X-Amz-Acl"
ExtOwnershipKey = "Seaweed-X-Amz-Ownership"
) )

5
weed/s3api/s3_constants/header.go

@ -37,8 +37,9 @@ const (
AmzObjectTaggingDirective = "X-Amz-Tagging-Directive" AmzObjectTaggingDirective = "X-Amz-Tagging-Directive"
AmzTagCount = "x-amz-tagging-count" AmzTagCount = "x-amz-tagging-count"
X_SeaweedFS_Header_Directory_Key = "x-seaweedfs-is-directory-key"
XSeaweedFSHeaderAmzBucketOwnerId = "x-seaweedfs-amz-bucket-owner-id"
X_SeaweedFS_Header_Directory_Key = "x-seaweedfs-is-directory-key"
XSeaweedFSHeaderAmzBucketOwnerId = "x-seaweedfs-amz-bucket-owner-id"
XSeaweedFSHeaderAmzBucketAccessDenied = "x-seaweedfs-amz-bucket-access-denied"
// S3 ACL headers // S3 ACL headers
AmzCannedAcl = "X-Amz-Acl" AmzCannedAcl = "X-Amz-Acl"

381
weed/s3api/s3acl/acl_helper.go

@ -3,6 +3,7 @@ package s3acl
import ( import (
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"fmt"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer"
@ -16,6 +17,8 @@ import (
"strings" "strings"
) )
var customAclHeaders = []string{s3_constants.AmzAclFullControl, s3_constants.AmzAclRead, s3_constants.AmzAclReadAcp, s3_constants.AmzAclWrite, s3_constants.AmzAclWriteAcp}
// GetAccountId get AccountId from request headers, AccountAnonymousId will be return if not presen // GetAccountId get AccountId from request headers, AccountAnonymousId will be return if not presen
func GetAccountId(r *http.Request) string { func GetAccountId(r *http.Request) string {
id := r.Header.Get(s3_constants.AmzAccountId) id := r.Header.Get(s3_constants.AmzAccountId)
@ -26,11 +29,36 @@ func GetAccountId(r *http.Request) string {
} }
} }
// ExtractAcl extracts the acl from the request body, or from the header if request body is empty
func ExtractAcl(r *http.Request, accountManager *s3account.AccountManager, ownership, bucketOwnerId, ownerId, accountId string) (grants []*s3.Grant, errCode s3err.ErrorCode) {
if r.Body != nil && r.Body != http.NoBody {
defer util.CloseRequest(r)
// ValidateAccount validate weather request account id is allowed to access
func ValidateAccount(requestAccountId string, allowedAccounts ...string) bool {
for _, allowedAccount := range allowedAccounts {
if requestAccountId == allowedAccount {
return true
}
}
return false
}
// ExtractBucketAcl extracts the acl from the request body, or from the header if request body is empty
func ExtractBucketAcl(r *http.Request, accountManager *s3account.AccountManager, objectOwnership, bucketOwnerId, requestAccountId string, createBucket bool) (grants []*s3.Grant, errCode s3err.ErrorCode) {
cannedAclPresent := false
if r.Header.Get(s3_constants.AmzCannedAcl) != "" {
cannedAclPresent = true
}
customAclPresent := false
for _, customAclHeader := range customAclHeaders {
if r.Header.Get(customAclHeader) != "" {
customAclPresent = true
break
}
}
// AccessControlList body is not support when create object/bucket
if !createBucket && r.Body != nil && r.Body != http.NoBody {
defer util.CloseRequest(r)
if cannedAclPresent || customAclPresent {
return nil, s3err.ErrUnexpectedContent
}
var acp s3.AccessControlPolicy var acp s3.AccessControlPolicy
err := xmlutil.UnmarshalXML(&acp, xml.NewDecoder(r.Body), "") err := xmlutil.UnmarshalXML(&acp, xml.NewDecoder(r.Body), "")
if err != nil || acp.Owner == nil || acp.Owner.ID == nil { if err != nil || acp.Owner == nil || acp.Owner.ID == nil {
@ -38,116 +66,127 @@ func ExtractAcl(r *http.Request, accountManager *s3account.AccountManager, owner
} }
//owner should present && owner is immutable //owner should present && owner is immutable
if *acp.Owner.ID != ownerId {
glog.V(3).Infof("set acl denied! owner account is not consistent, request account id: %s, expect account id: %s", accountId, ownerId)
if *acp.Owner.ID == "" || *acp.Owner.ID != bucketOwnerId {
glog.V(3).Infof("set acl denied! owner account is not consistent, request account id: %s, expect account id: %s", *acp.Owner.ID, bucketOwnerId)
return nil, s3err.ErrAccessDenied return nil, s3err.ErrAccessDenied
} }
return ValidateAndTransferGrants(accountManager, acp.Grants)
grants = acp.Grants
} else { } else {
_, grants, errCode = ParseAndValidateAclHeadersOrElseDefault(r, accountManager, ownership, bucketOwnerId, accountId, true)
return grants, errCode
if cannedAclPresent && customAclPresent {
return nil, s3err.ErrInvalidRequest
}
if cannedAclPresent {
grants, errCode = ExtractBucketCannedAcl(r, requestAccountId)
} else if customAclPresent {
grants, errCode = ExtractCustomAcl(r)
}
if errCode != s3err.ErrNone {
return nil, errCode
}
} }
}
// ParseAndValidateAclHeadersOrElseDefault will callParseAndValidateAclHeaders to get Grants, if empty, it will return Grant that grant `accountId` with `FullControl` permission
func ParseAndValidateAclHeadersOrElseDefault(r *http.Request, accountManager *s3account.AccountManager, ownership, bucketOwnerId, accountId string, putAcl bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) {
ownerId, grants, errCode = ParseAndValidateAclHeaders(r, accountManager, ownership, bucketOwnerId, accountId, putAcl)
errCode = ValidateObjectOwnershipAndGrants(objectOwnership, bucketOwnerId, grants)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return
return nil, errCode
} }
if len(grants) == 0 {
//if no acl(both customAcl and cannedAcl) specified, grant accountId(object writer) with full control permission
grants = append(grants, &s3.Grant{
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: &accountId,
},
Permission: &s3_constants.PermissionFullControl,
})
grants, errCode = ValidateAndTransferGrants(accountManager, grants)
if errCode != s3err.ErrNone {
return nil, errCode
} }
return
return grants, s3err.ErrNone
} }
// ParseAndValidateAclHeaders parse and validate acl from header
func ParseAndValidateAclHeaders(r *http.Request, accountManager *s3account.AccountManager, ownership, bucketOwnerId, accountId string, putAcl bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) {
ownerId, grants, errCode = ParseAclHeaders(r, ownership, bucketOwnerId, accountId, putAcl)
if errCode != s3err.ErrNone {
return
// ExtractObjectAcl extracts the acl from the request body, or from the header if request body is empty
func ExtractObjectAcl(r *http.Request, accountManager *s3account.AccountManager, objectOwnership, bucketOwnerId, requestAccountId string, createObject bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) {
cannedAclPresent := false
if r.Header.Get(s3_constants.AmzCannedAcl) != "" {
cannedAclPresent = true
} }
if len(grants) > 0 {
grants, errCode = ValidateAndTransferGrants(accountManager, grants)
customAclPresent := false
for _, customAclHeader := range customAclHeaders {
if r.Header.Get(customAclHeader) != "" {
customAclPresent = true
break
}
} }
return
}
// ParseAclHeaders parse acl headers
// When `putAcl` is true, only `CannedAcl` is parsed, such as `PutBucketAcl` or `PutObjectAcl`
// is requested, `CustomAcl` is parsed from the request body not from headers, and only if the
// request body is empty, `CannedAcl` is parsed from the header, and will not parse `CustomAcl` from the header
//
// Since `CustomAcl` has higher priority, it will be parsed first; if `CustomAcl` does not exist, `CannedAcl` will be parsed
func ParseAclHeaders(r *http.Request, ownership, bucketOwnerId, accountId string, putAcl bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) {
if !putAcl {
errCode = ParseCustomAclHeaders(r, &grants)
// AccessControlList body is not support when create object/bucket
if !createObject && r.Body != nil && r.Body != http.NoBody {
defer util.CloseRequest(r)
if cannedAclPresent || customAclPresent {
return "", nil, s3err.ErrUnexpectedContent
}
var acp s3.AccessControlPolicy
err := xmlutil.UnmarshalXML(&acp, xml.NewDecoder(r.Body), "")
if err != nil || acp.Owner == nil || acp.Owner.ID == nil {
return "", nil, s3err.ErrInvalidRequest
}
//owner should present && owner is immutable
if *acp.Owner.ID == "" {
glog.V(1).Infof("Access denied! The owner id is required when specifying grants using AccessControlList")
return "", nil, s3err.ErrAccessDenied
}
ownerId = *acp.Owner.ID
grants = acp.Grants
} else {
if cannedAclPresent && customAclPresent {
return "", nil, s3err.ErrInvalidRequest
}
if cannedAclPresent {
ownerId, grants, errCode = ExtractObjectCannedAcl(r, objectOwnership, bucketOwnerId, requestAccountId, createObject)
} else {
grants, errCode = ExtractCustomAcl(r)
}
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return "", nil, errCode return "", nil, errCode
} }
} }
if len(grants) > 0 {
return accountId, grants, s3err.ErrNone
}
cannedAcl := r.Header.Get(s3_constants.AmzCannedAcl)
if len(cannedAcl) == 0 {
return accountId, grants, s3err.ErrNone
}
//if canned acl specified, parse cannedAcl (lower priority to custom acl)
ownerId, grants, errCode = ParseCannedAclHeader(ownership, bucketOwnerId, accountId, cannedAcl, putAcl)
errCode = ValidateObjectOwnershipAndGrants(objectOwnership, bucketOwnerId, grants)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return "", nil, errCode return "", nil, errCode
} }
grants, errCode = ValidateAndTransferGrants(accountManager, grants)
return ownerId, grants, errCode return ownerId, grants, errCode
} }
func ParseCustomAclHeaders(r *http.Request, grants *[]*s3.Grant) s3err.ErrorCode {
customAclHeaders := []string{s3_constants.AmzAclFullControl, s3_constants.AmzAclRead, s3_constants.AmzAclReadAcp, s3_constants.AmzAclWrite, s3_constants.AmzAclWriteAcp}
func ExtractCustomAcl(r *http.Request) ([]*s3.Grant, s3err.ErrorCode) {
var errCode s3err.ErrorCode var errCode s3err.ErrorCode
var grants []*s3.Grant
for _, customAclHeader := range customAclHeaders { for _, customAclHeader := range customAclHeaders {
headerValue := r.Header.Get(customAclHeader) headerValue := r.Header.Get(customAclHeader)
switch customAclHeader { switch customAclHeader {
case s3_constants.AmzAclRead: case s3_constants.AmzAclRead:
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionRead, grants)
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionRead, &grants)
case s3_constants.AmzAclWrite: case s3_constants.AmzAclWrite:
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionWrite, grants)
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionWrite, &grants)
case s3_constants.AmzAclReadAcp: case s3_constants.AmzAclReadAcp:
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionReadAcp, grants)
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionReadAcp, &grants)
case s3_constants.AmzAclWriteAcp: case s3_constants.AmzAclWriteAcp:
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionWriteAcp, grants)
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionWriteAcp, &grants)
case s3_constants.AmzAclFullControl: case s3_constants.AmzAclFullControl:
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionFullControl, grants)
errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionFullControl, &grants)
default:
errCode = s3err.ErrInvalidAclArgument
} }
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return errCode
return nil, errCode
} }
} }
return s3err.ErrNone
return grants, s3err.ErrNone
} }
func ParseCustomAclHeader(headerValue, permission string, grants *[]*s3.Grant) s3err.ErrorCode { func ParseCustomAclHeader(headerValue, permission string, grants *[]*s3.Grant) s3err.ErrorCode {
if len(headerValue) > 0 { if len(headerValue) > 0 {
split := strings.Split(headerValue, ", ")
split := strings.Split(headerValue, ",")
for _, grantStr := range split { for _, grantStr := range split {
kv := strings.Split(grantStr, "=") kv := strings.Split(grantStr, "=")
if len(kv) != 2 { if len(kv) != 2 {
return s3err.ErrInvalidRequest return s3err.ErrInvalidRequest
} }
switch kv[0] {
switch strings.TrimSpace(kv[0]) {
case "id": case "id":
var accountId string
_ = json.Unmarshal([]byte(kv[1]), &accountId)
accountId := decodeGranteeValue(kv[1])
*grants = append(*grants, &s3.Grant{ *grants = append(*grants, &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser, Type: &s3_constants.GrantTypeCanonicalUser,
@ -156,8 +195,7 @@ func ParseCustomAclHeader(headerValue, permission string, grants *[]*s3.Grant) s
Permission: &permission, Permission: &permission,
}) })
case "emailAddress": case "emailAddress":
var emailAddress string
_ = json.Unmarshal([]byte(kv[1]), &emailAddress)
emailAddress := decodeGranteeValue(kv[1])
*grants = append(*grants, &s3.Grant{ *grants = append(*grants, &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeAmazonCustomerByEmail, Type: &s3_constants.GrantTypeAmazonCustomerByEmail,
@ -167,7 +205,7 @@ func ParseCustomAclHeader(headerValue, permission string, grants *[]*s3.Grant) s
}) })
case "uri": case "uri":
var groupName string var groupName string
_ = json.Unmarshal([]byte(kv[1]), &groupName)
groupName = decodeGranteeValue(kv[1])
*grants = append(*grants, &s3.Grant{ *grants = append(*grants, &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeGroup, Type: &s3_constants.GrantTypeGroup,
@ -179,17 +217,66 @@ func ParseCustomAclHeader(headerValue, permission string, grants *[]*s3.Grant) s
} }
} }
return s3err.ErrNone return s3err.ErrNone
}
func decodeGranteeValue(value string) (result string) {
if !strings.HasPrefix(value, "\"") {
return value
}
_ = json.Unmarshal([]byte(value), &result)
if result == "" {
result = value
}
return result
} }
func ParseCannedAclHeader(bucketOwnership, bucketOwnerId, accountId, cannedAcl string, putAcl bool) (ownerId string, grants []*s3.Grant, err s3err.ErrorCode) {
// ExtractBucketCannedAcl parse bucket canned acl, includes: 'private'|'public-read'|'public-read-write'|'authenticated-read'
func ExtractBucketCannedAcl(request *http.Request, requestAccountId string) (grants []*s3.Grant, err s3err.ErrorCode) {
cannedAcl := request.Header.Get(s3_constants.AmzCannedAcl)
if cannedAcl == "" {
return grants, s3err.ErrNone
}
err = s3err.ErrNone err = s3err.ErrNone
ownerId = accountId
objectWriterFullControl := &s3.Grant{
Grantee: &s3.Grantee{
ID: &requestAccountId,
Type: &s3_constants.GrantTypeCanonicalUser,
},
Permission: &s3_constants.PermissionFullControl,
}
switch cannedAcl {
case s3_constants.CannedAclPrivate:
grants = append(grants, objectWriterFullControl)
case s3_constants.CannedAclPublicRead:
grants = append(grants, objectWriterFullControl)
grants = append(grants, s3_constants.PublicRead...)
case s3_constants.CannedAclPublicReadWrite:
grants = append(grants, objectWriterFullControl)
grants = append(grants, s3_constants.PublicReadWrite...)
case s3_constants.CannedAclAuthenticatedRead:
grants = append(grants, objectWriterFullControl)
grants = append(grants, s3_constants.AuthenticatedRead...)
default:
err = s3err.ErrInvalidAclArgument
}
return
}
//objectWrite automatically has full control on current object
// ExtractObjectCannedAcl parse object canned acl, includes: 'private'|'public-read'|'public-read-write'|'authenticated-read'|'aws-exec-read'|'bucket-owner-read'|'bucket-owner-full-control'
func ExtractObjectCannedAcl(request *http.Request, objectOwnership, bucketOwnerId, requestAccountId string, createObject bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) {
if createObject {
ownerId = requestAccountId
}
cannedAcl := request.Header.Get(s3_constants.AmzCannedAcl)
if cannedAcl == "" {
return ownerId, grants, s3err.ErrNone
}
errCode = s3err.ErrNone
objectWriterFullControl := &s3.Grant{ objectWriterFullControl := &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
ID: &accountId,
ID: &requestAccountId,
Type: &s3_constants.GrantTypeCanonicalUser, Type: &s3_constants.GrantTypeCanonicalUser,
}, },
Permission: &s3_constants.PermissionFullControl, Permission: &s3_constants.PermissionFullControl,
@ -212,7 +299,7 @@ func ParseCannedAclHeader(bucketOwnership, bucketOwnerId, accountId, cannedAcl s
grants = append(grants, s3_constants.LogDeliveryWrite...) grants = append(grants, s3_constants.LogDeliveryWrite...)
case s3_constants.CannedAclBucketOwnerRead: case s3_constants.CannedAclBucketOwnerRead:
grants = append(grants, objectWriterFullControl) grants = append(grants, objectWriterFullControl)
if bucketOwnerId != "" && bucketOwnerId != accountId {
if requestAccountId != bucketOwnerId {
grants = append(grants, grants = append(grants,
&s3.Grant{ &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
@ -225,7 +312,7 @@ func ParseCannedAclHeader(bucketOwnership, bucketOwnerId, accountId, cannedAcl s
case s3_constants.CannedAclBucketOwnerFullControl: case s3_constants.CannedAclBucketOwnerFullControl:
if bucketOwnerId != "" { if bucketOwnerId != "" {
// if set ownership to 'BucketOwnerPreferred' when upload object, the bucket owner will be the object owner // if set ownership to 'BucketOwnerPreferred' when upload object, the bucket owner will be the object owner
if !putAcl && bucketOwnership == s3_constants.OwnershipBucketOwnerPreferred {
if createObject && objectOwnership == s3_constants.OwnershipBucketOwnerPreferred {
ownerId = bucketOwnerId ownerId = bucketOwnerId
grants = append(grants, grants = append(grants,
&s3.Grant{ &s3.Grant{
@ -237,7 +324,7 @@ func ParseCannedAclHeader(bucketOwnership, bucketOwnerId, accountId, cannedAcl s
}) })
} else { } else {
grants = append(grants, objectWriterFullControl) grants = append(grants, objectWriterFullControl)
if accountId != bucketOwnerId {
if requestAccountId != bucketOwnerId {
grants = append(grants, grants = append(grants,
&s3.Grant{ &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
@ -249,15 +336,13 @@ func ParseCannedAclHeader(bucketOwnership, bucketOwnerId, accountId, cannedAcl s
} }
} }
} }
case s3_constants.CannedAclAwsExecRead:
err = s3err.ErrNotImplemented
default: default:
err = s3err.ErrInvalidRequest
errCode = s3err.ErrInvalidAclArgument
} }
return return
} }
// ValidateAndTransferGrants validate grant & transfer Email-Grant to Id-Grant
// ValidateAndTransferGrants validate grant entity exists and transfer Email-Grant to Id-Grant
func ValidateAndTransferGrants(accountManager *s3account.AccountManager, grants []*s3.Grant) ([]*s3.Grant, s3err.ErrorCode) { func ValidateAndTransferGrants(accountManager *s3account.AccountManager, grants []*s3.Grant) ([]*s3.Grant, s3err.ErrorCode) {
var result []*s3.Grant var result []*s3.Grant
for _, grant := range grants { for _, grant := range grants {
@ -314,15 +399,43 @@ func ValidateAndTransferGrants(accountManager *s3account.AccountManager, grants
return result, s3err.ErrNone return result, s3err.ErrNone
} }
// DetermineReqGrants generates the grant set (Grants) according to accountId and reqPermission.
func DetermineReqGrants(accountId, aclAction string) (grants []*s3.Grant) {
// ValidateObjectOwnershipAndGrants validate if grants equals OwnerFullControl when 'ObjectOwnership' is 'BucketOwnerEnforced'
func ValidateObjectOwnershipAndGrants(objectOwnership, bucketOwnerId string, grants []*s3.Grant) s3err.ErrorCode {
if len(grants) == 0 {
return s3err.ErrNone
}
if objectOwnership == "" {
objectOwnership = s3_constants.DefaultObjectOwnership
}
if objectOwnership != s3_constants.OwnershipBucketOwnerEnforced {
return s3err.ErrNone
}
if len(grants) > 1 {
return s3err.AccessControlListNotSupported
}
bucketOwnerFullControlGrant := &s3.Grant{
Permission: &s3_constants.PermissionFullControl,
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: &bucketOwnerId,
},
}
if GrantEquals(bucketOwnerFullControlGrant, grants[0]) {
return s3err.ErrNone
}
return s3err.AccessControlListNotSupported
}
// DetermineRequiredGrants generates the grant set (Grants) according to accountId and reqPermission.
func DetermineRequiredGrants(accountId, permission string) (grants []*s3.Grant) {
// group grantee (AllUsers) // group grantee (AllUsers)
grants = append(grants, &s3.Grant{ grants = append(grants, &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeGroup, Type: &s3_constants.GrantTypeGroup,
URI: &s3_constants.GranteeGroupAllUsers, URI: &s3_constants.GranteeGroupAllUsers,
}, },
Permission: &aclAction,
Permission: &permission,
}) })
grants = append(grants, &s3.Grant{ grants = append(grants, &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
@ -338,7 +451,7 @@ func DetermineReqGrants(accountId, aclAction string) (grants []*s3.Grant) {
Type: &s3_constants.GrantTypeCanonicalUser, Type: &s3_constants.GrantTypeCanonicalUser,
ID: &accountId, ID: &accountId,
}, },
Permission: &aclAction,
Permission: &permission,
}) })
grants = append(grants, &s3.Grant{ grants = append(grants, &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
@ -355,7 +468,7 @@ func DetermineReqGrants(accountId, aclAction string) (grants []*s3.Grant) {
Type: &s3_constants.GrantTypeGroup, Type: &s3_constants.GrantTypeGroup,
URI: &s3_constants.GranteeGroupAuthenticatedUsers, URI: &s3_constants.GranteeGroupAuthenticatedUsers,
}, },
Permission: &aclAction,
Permission: &permission,
}) })
grants = append(grants, &s3.Grant{ grants = append(grants, &s3.Grant{
Grantee: &s3.Grantee{ Grantee: &s3.Grantee{
@ -380,9 +493,9 @@ func GetAcpOwner(entryExtended map[string][]byte, defaultOwner string) string {
return defaultOwner return defaultOwner
} }
func SetAcpGrantsHeader(r *http.Request, acpGrants []*s3.Grant) {
if len(acpGrants) > 0 {
a, err := json.Marshal(acpGrants)
func SetAcpGrantsHeader(r *http.Request, grants []*s3.Grant) {
if len(grants) > 0 {
a, err := MarshalGrantsToJson(grants)
if err == nil { if err == nil {
r.Header.Set(s3_constants.ExtAmzAclKey, string(a)) r.Header.Set(s3_constants.ExtAmzAclKey, string(a))
} else { } else {
@ -392,7 +505,7 @@ func SetAcpGrantsHeader(r *http.Request, acpGrants []*s3.Grant) {
} }
// GetAcpGrants return grants parsed from entry // GetAcpGrants return grants parsed from entry
func GetAcpGrants(entryExtended map[string][]byte) []*s3.Grant {
func GetAcpGrants(ownerId *string, entryExtended map[string][]byte) []*s3.Grant {
acpBytes, ok := entryExtended[s3_constants.ExtAmzAclKey] acpBytes, ok := entryExtended[s3_constants.ExtAmzAclKey]
if ok && len(acpBytes) > 0 { if ok && len(acpBytes) > 0 {
var grants []*s3.Grant var grants []*s3.Grant
@ -400,31 +513,43 @@ func GetAcpGrants(entryExtended map[string][]byte) []*s3.Grant {
if err == nil { if err == nil {
return grants return grants
} }
glog.Warning("grants Unmarshal error", err)
}
if ownerId == nil {
return nil
}
return []*s3.Grant{
{
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: ownerId,
},
Permission: &s3_constants.PermissionFullControl,
},
} }
return nil
} }
// AssembleEntryWithAcp fill entry with owner and grants // AssembleEntryWithAcp fill entry with owner and grants
func AssembleEntryWithAcp(objectEntry *filer_pb.Entry, objectOwner string, grants []*s3.Grant) s3err.ErrorCode {
if objectEntry.Extended == nil {
objectEntry.Extended = make(map[string][]byte)
func AssembleEntryWithAcp(filerEntry *filer_pb.Entry, ownerId string, grants []*s3.Grant) s3err.ErrorCode {
if filerEntry.Extended == nil {
filerEntry.Extended = make(map[string][]byte)
} }
if len(objectOwner) > 0 {
objectEntry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(objectOwner)
if len(ownerId) > 0 {
filerEntry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(ownerId)
} else { } else {
delete(objectEntry.Extended, s3_constants.ExtAmzOwnerKey)
delete(filerEntry.Extended, s3_constants.ExtAmzOwnerKey)
} }
if len(grants) > 0 {
grantsBytes, err := json.Marshal(grants)
if grants != nil {
grantsBytes, err := MarshalGrantsToJson(grants)
if err != nil { if err != nil {
glog.Warning("assemble acp to entry:", err) glog.Warning("assemble acp to entry:", err)
return s3err.ErrInvalidRequest return s3err.ErrInvalidRequest
} }
objectEntry.Extended[s3_constants.ExtAmzAclKey] = grantsBytes
filerEntry.Extended[s3_constants.ExtAmzAclKey] = grantsBytes
} else { } else {
delete(objectEntry.Extended, s3_constants.ExtAmzAclKey)
delete(filerEntry.Extended, s3_constants.ExtAmzAclKey)
} }
return s3err.ErrNone return s3err.ErrNone
@ -509,6 +634,46 @@ func GrantEquals(a, b *s3.Grant) bool {
return true return true
} }
func MarshalGrantsToJson(grants []*s3.Grant) ([]byte, error) {
if len(grants) == 0 {
return []byte{}, nil
}
var GrantsToMap []map[string]any
for _, grant := range grants {
grantee := grant.Grantee
switch *grantee.Type {
case s3_constants.GrantTypeGroup:
GrantsToMap = append(GrantsToMap, map[string]any{
"Permission": grant.Permission,
"Grantee": map[string]any{
"Type": grantee.Type,
"URI": grantee.URI,
},
})
case s3_constants.GrantTypeCanonicalUser:
GrantsToMap = append(GrantsToMap, map[string]any{
"Permission": grant.Permission,
"Grantee": map[string]any{
"Type": grantee.Type,
"ID": grantee.ID,
},
})
case s3_constants.GrantTypeAmazonCustomerByEmail:
GrantsToMap = append(GrantsToMap, map[string]any{
"Permission": grant.Permission,
"Grantee": map[string]any{
"Type": grantee.Type,
"EmailAddress": grantee.EmailAddress,
},
})
default:
return nil, fmt.Errorf("grantee type[%s] is not valid", *grantee.Type)
}
}
return json.Marshal(GrantsToMap)
}
func GrantWithFullControl(accountId string) *s3.Grant { func GrantWithFullControl(accountId string) *s3.Grant {
return &s3.Grant{ return &s3.Grant{
Permission: &s3_constants.PermissionFullControl, Permission: &s3_constants.PermissionFullControl,
@ -524,22 +689,22 @@ func CheckObjectAccessForReadObject(r *http.Request, w http.ResponseWriter, entr
return http.StatusOK, true return http.StatusOK, true
} }
accountId := GetAccountId(r)
if len(accountId) == 0 {
requestAccountId := GetAccountId(r)
if len(requestAccountId) == 0 {
glog.Warning("#checkObjectAccessForReadObject header[accountId] not exists!") glog.Warning("#checkObjectAccessForReadObject header[accountId] not exists!")
return http.StatusForbidden, false return http.StatusForbidden, false
} }
//owner access //owner access
objectOwner := GetAcpOwner(entry.Extended, bucketOwnerId) objectOwner := GetAcpOwner(entry.Extended, bucketOwnerId)
if accountId == objectOwner {
if ValidateAccount(requestAccountId, objectOwner) {
return http.StatusOK, true return http.StatusOK, true
} }
//find in Grants //find in Grants
acpGrants := GetAcpGrants(entry.Extended)
acpGrants := GetAcpGrants(nil, entry.Extended)
if acpGrants != nil { if acpGrants != nil {
reqGrants := DetermineReqGrants(accountId, s3_constants.PermissionRead)
reqGrants := DetermineRequiredGrants(requestAccountId, s3_constants.PermissionRead)
for _, requiredGrant := range reqGrants { for _, requiredGrant := range reqGrants {
for _, grant := range acpGrants { for _, grant := range acpGrants {
if GrantEquals(requiredGrant, grant) { if GrantEquals(requiredGrant, grant) {
@ -549,6 +714,6 @@ func CheckObjectAccessForReadObject(r *http.Request, w http.ResponseWriter, entr
} }
} }
glog.V(3).Infof("acl denied! request account id: %s", accountId)
glog.V(3).Infof("acl denied! request account id: %s", requestAccountId)
return http.StatusForbidden, false return http.StatusForbidden, false
} }

1391
weed/s3api/s3acl/acl_helper_test.go
File diff suppressed because it is too large
View File

333
weed/s3api/s3api_acp.go

@ -23,19 +23,19 @@ func getAccountId(r *http.Request) string {
} }
func (s3a *S3ApiServer) checkAccessByOwnership(r *http.Request, bucket string) s3err.ErrorCode { func (s3a *S3ApiServer) checkAccessByOwnership(r *http.Request, bucket string) s3err.ErrorCode {
metadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return errCode return errCode
} }
accountId := getAccountId(r)
if accountId == s3account.AccountAdmin.Id || accountId == *metadata.Owner.ID {
requestAccountId := getAccountId(r)
if s3acl.ValidateAccount(requestAccountId, *bucketMetadata.Owner.ID) {
return s3err.ErrNone return s3err.ErrNone
} }
return s3err.ErrAccessDenied return s3err.ErrAccessDenied
} }
//Check access for PutBucketAclHandler //Check access for PutBucketAclHandler
func (s3a *S3ApiServer) checkAccessForPutBucketAcl(accountId, bucket string) (*BucketMetaData, s3err.ErrorCode) {
func (s3a *S3ApiServer) checkAccessForPutBucketAcl(requestAccountId, bucket string) (*BucketMetaData, s3err.ErrorCode) {
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket) bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return nil, errCode return nil, errCode
@ -45,12 +45,12 @@ func (s3a *S3ApiServer) checkAccessForPutBucketAcl(accountId, bucket string) (*B
return nil, s3err.AccessControlListNotSupported return nil, s3err.AccessControlListNotSupported
} }
if accountId == s3account.AccountAdmin.Id || accountId == *bucketMetadata.Owner.ID {
if s3acl.ValidateAccount(requestAccountId, *bucketMetadata.Owner.ID) {
return bucketMetadata, s3err.ErrNone return bucketMetadata, s3err.ErrNone
} }
if len(bucketMetadata.Acl) > 0 { if len(bucketMetadata.Acl) > 0 {
reqGrants := s3acl.DetermineReqGrants(accountId, s3_constants.PermissionWriteAcp)
reqGrants := s3acl.DetermineRequiredGrants(requestAccountId, s3_constants.PermissionWriteAcp)
for _, bucketGrant := range bucketMetadata.Acl { for _, bucketGrant := range bucketMetadata.Acl {
for _, reqGrant := range reqGrants { for _, reqGrant := range reqGrants {
if s3acl.GrantEquals(bucketGrant, reqGrant) { if s3acl.GrantEquals(bucketGrant, reqGrant) {
@ -59,7 +59,7 @@ func (s3a *S3ApiServer) checkAccessForPutBucketAcl(accountId, bucket string) (*B
} }
} }
} }
glog.V(3).Infof("acl denied! request account id: %s", accountId)
glog.V(3).Infof("acl denied! request account id: %s", requestAccountId)
return nil, s3err.ErrAccessDenied return nil, s3err.ErrAccessDenied
} }
@ -73,6 +73,7 @@ func updateBucketEntry(s3a *S3ApiServer, entry *filer_pb.Entry) error {
// - GetBucketAclHandler // - GetBucketAclHandler
// - ListObjectsV1Handler // - ListObjectsV1Handler
// - ListObjectsV2Handler // - ListObjectsV2Handler
// - ListMultipartUploadsHandler
func (s3a *S3ApiServer) checkAccessForReadBucket(r *http.Request, bucket, aclAction string) (*BucketMetaData, s3err.ErrorCode) { func (s3a *S3ApiServer) checkAccessForReadBucket(r *http.Request, bucket, aclAction string) (*BucketMetaData, s3err.ErrorCode) {
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket) bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
@ -83,13 +84,13 @@ func (s3a *S3ApiServer) checkAccessForReadBucket(r *http.Request, bucket, aclAct
return bucketMetadata, s3err.ErrNone return bucketMetadata, s3err.ErrNone
} }
accountId := s3acl.GetAccountId(r)
if accountId == s3account.AccountAdmin.Id || accountId == *bucketMetadata.Owner.ID {
requestAccountId := s3acl.GetAccountId(r)
if s3acl.ValidateAccount(requestAccountId, *bucketMetadata.Owner.ID) {
return bucketMetadata, s3err.ErrNone return bucketMetadata, s3err.ErrNone
} }
if len(bucketMetadata.Acl) > 0 { if len(bucketMetadata.Acl) > 0 {
reqGrants := s3acl.DetermineReqGrants(accountId, aclAction)
reqGrants := s3acl.DetermineRequiredGrants(requestAccountId, aclAction)
for _, bucketGrant := range bucketMetadata.Acl { for _, bucketGrant := range bucketMetadata.Acl {
for _, reqGrant := range reqGrants { for _, reqGrant := range reqGrants {
if s3acl.GrantEquals(bucketGrant, reqGrant) { if s3acl.GrantEquals(bucketGrant, reqGrant) {
@ -99,7 +100,7 @@ func (s3a *S3ApiServer) checkAccessForReadBucket(r *http.Request, bucket, aclAct
} }
} }
glog.V(3).Infof("acl denied! request account id: %s", accountId)
glog.V(3).Infof("acl denied! request account id: %s", requestAccountId)
return nil, s3err.ErrAccessDenied return nil, s3err.ErrAccessDenied
} }
@ -121,14 +122,12 @@ func (s3a *S3ApiServer) checkAccessForReadObjectAcl(r *http.Request, bucket, obj
return nil, s3err.ErrInternalError return nil, s3err.ErrInternalError
} }
} }
if entry.IsDirectory { if entry.IsDirectory {
return nil, s3err.ErrExistingObjectIsDirectory return nil, s3err.ErrExistingObjectIsDirectory
} }
acpOwnerId := s3acl.GetAcpOwner(entry.Extended, *bucketMetadata.Owner.ID) acpOwnerId := s3acl.GetAcpOwner(entry.Extended, *bucketMetadata.Owner.ID)
acpOwnerName := s3a.accountManager.IdNameMapping[acpOwnerId] acpOwnerName := s3a.accountManager.IdNameMapping[acpOwnerId]
acpGrants := s3acl.GetAcpGrants(entry.Extended)
acpGrants := s3acl.GetAcpGrants(&acpOwnerId, entry.Extended)
acp = &s3.AccessControlPolicy{ acp = &s3.AccessControlPolicy{
Owner: &s3.Owner{ Owner: &s3.Owner{
ID: &acpOwnerId, ID: &acpOwnerId,
@ -138,36 +137,29 @@ func (s3a *S3ApiServer) checkAccessForReadObjectAcl(r *http.Request, bucket, obj
} }
return acp, s3err.ErrNone return acp, s3err.ErrNone
} }
if bucketMetadata.ObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced { if bucketMetadata.ObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced {
return getAcpFunc() return getAcpFunc()
} else {
accountId := s3acl.GetAccountId(r)
acp, errCode := getAcpFunc()
if errCode != s3err.ErrNone {
return nil, errCode
}
if accountId == *acp.Owner.ID {
return acp, s3err.ErrNone
}
//find in Grants
if acp.Grants != nil {
reqGrants := s3acl.DetermineReqGrants(accountId, s3_constants.PermissionReadAcp)
for _, requiredGrant := range reqGrants {
for _, grant := range acp.Grants {
if s3acl.GrantEquals(requiredGrant, grant) {
return acp, s3err.ErrNone
}
}
requestAccountId := s3acl.GetAccountId(r)
acp, errCode = getAcpFunc()
if errCode != s3err.ErrNone {
return nil, errCode
}
if s3acl.ValidateAccount(requestAccountId, *acp.Owner.ID) {
return acp, s3err.ErrNone
}
if acp.Grants != nil {
reqGrants := s3acl.DetermineRequiredGrants(requestAccountId, s3_constants.PermissionReadAcp)
for _, requiredGrant := range reqGrants {
for _, grant := range acp.Grants {
if s3acl.GrantEquals(requiredGrant, grant) {
return acp, s3err.ErrNone
} }
} }
} }
glog.V(3).Infof("acl denied! request account id: %s", accountId)
return nil, s3err.ErrAccessDenied
} }
glog.V(3).Infof("CheckAccessForReadObjectAcl denied! request account id: %s", requestAccountId)
return nil, s3err.ErrAccessDenied
} }
// Check Object-Read related access // Check Object-Read related access
@ -184,6 +176,10 @@ func (s3a *S3ApiServer) checkBucketAccessForReadObject(r *http.Request, bucket s
if bucketMetadata.ObjectOwnership != s3_constants.OwnershipBucketOwnerEnforced { if bucketMetadata.ObjectOwnership != s3_constants.OwnershipBucketOwnerEnforced {
//offload object acl validation to filer layer //offload object acl validation to filer layer
_, defaultErrorCode := s3a.checkAccessForReadBucket(r, bucket, s3_constants.PermissionRead)
if defaultErrorCode != s3err.ErrNone {
r.Header.Set(s3_constants.XSeaweedFSHeaderAmzBucketAccessDenied, "true")
}
r.Header.Set(s3_constants.XSeaweedFSHeaderAmzBucketOwnerId, *bucketMetadata.Owner.ID) r.Header.Set(s3_constants.XSeaweedFSHeaderAmzBucketOwnerId, *bucketMetadata.Owner.ID)
} }
@ -193,67 +189,58 @@ func (s3a *S3ApiServer) checkBucketAccessForReadObject(r *http.Request, bucket s
// Check ObjectAcl-Write related access // Check ObjectAcl-Write related access
// includes: // includes:
// - PutObjectAclHandler // - PutObjectAclHandler
func (s3a *S3ApiServer) checkAccessForWriteObjectAcl(accountId, bucket, object string) (bucketMetadata *BucketMetaData, objectEntry *filer_pb.Entry, objectOwner string, errCode s3err.ErrorCode) {
bucketMetadata, errCode = s3a.bucketRegistry.GetBucketMetadata(bucket)
func (s3a *S3ApiServer) checkAccessForWriteObjectAcl(r *http.Request, bucket, object string) (*filer_pb.Entry, string, []*s3.Grant, s3err.ErrorCode) {
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return nil, nil, "", errCode
return nil, "", nil, errCode
} }
if bucketMetadata.ObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced {
return nil, nil, "", s3err.AccessControlListNotSupported
requestAccountId := s3acl.GetAccountId(r)
reqOwnerId, grants, errCode := s3acl.ExtractObjectAcl(r, s3a.accountManager, bucketMetadata.ObjectOwnership, *bucketMetadata.Owner.ID, requestAccountId, false)
if errCode != s3err.ErrNone {
return nil, "", nil, errCode
} }
//bucket acl
bucketAclAllowed := false
reqGrants := s3acl.DetermineReqGrants(accountId, s3_constants.PermissionWrite)
if accountId == *bucketMetadata.Owner.ID {
bucketAclAllowed = true
} else if bucketMetadata.Acl != nil {
bucketLoop:
for _, bucketGrant := range bucketMetadata.Acl {
for _, requiredGrant := range reqGrants {
if s3acl.GrantEquals(bucketGrant, requiredGrant) {
bucketAclAllowed = true
break bucketLoop
}
}
}
}
if !bucketAclAllowed {
return nil, nil, "", s3err.ErrAccessDenied
if bucketMetadata.ObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced {
return nil, "", nil, s3err.AccessControlListNotSupported
} }
//object acl //object acl
objectEntry, err := getObjectEntry(s3a, bucket, object) objectEntry, err := getObjectEntry(s3a, bucket, object)
if err != nil { if err != nil {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
return nil, nil, "", s3err.ErrNoSuchKey
return nil, "", nil, s3err.ErrNoSuchKey
} }
return nil, nil, "", s3err.ErrInternalError
return nil, "", nil, s3err.ErrInternalError
} }
if objectEntry.IsDirectory { if objectEntry.IsDirectory {
return nil, nil, "", s3err.ErrExistingObjectIsDirectory
return nil, "", nil, s3err.ErrExistingObjectIsDirectory
} }
objectOwner = s3acl.GetAcpOwner(objectEntry.Extended, *bucketMetadata.Owner.ID)
if accountId == objectOwner {
return bucketMetadata, objectEntry, objectOwner, s3err.ErrNone
objectOwner := s3acl.GetAcpOwner(objectEntry.Extended, *bucketMetadata.Owner.ID)
//object owner is immutable
if reqOwnerId != "" && reqOwnerId != objectOwner {
return nil, "", nil, s3err.ErrAccessDenied
}
if s3acl.ValidateAccount(requestAccountId, objectOwner) {
return objectEntry, objectOwner, grants, s3err.ErrNone
} }
objectGrants := s3acl.GetAcpGrants(objectEntry.Extended)
objectGrants := s3acl.GetAcpGrants(nil, objectEntry.Extended)
if objectGrants != nil { if objectGrants != nil {
requiredGrants := s3acl.DetermineRequiredGrants(requestAccountId, s3_constants.PermissionWriteAcp)
for _, objectGrant := range objectGrants { for _, objectGrant := range objectGrants {
for _, requiredGrant := range reqGrants {
for _, requiredGrant := range requiredGrants {
if s3acl.GrantEquals(objectGrant, requiredGrant) { if s3acl.GrantEquals(objectGrant, requiredGrant) {
return bucketMetadata, objectEntry, objectOwner, s3err.ErrNone
return objectEntry, objectOwner, grants, s3err.ErrNone
} }
} }
} }
} }
glog.V(3).Infof("acl denied! request account id: %s", accountId)
return nil, nil, "", s3err.ErrAccessDenied
glog.V(3).Infof("checkAccessForWriteObjectAcl denied! request account id: %s", requestAccountId)
return nil, "", nil, s3err.ErrAccessDenied
} }
func updateObjectEntry(s3a *S3ApiServer, bucket, object string, entry *filer_pb.Entry) error { func updateObjectEntry(s3a *S3ApiServer, bucket, object string, entry *filer_pb.Entry) error {
@ -269,151 +256,159 @@ func (s3a *S3ApiServer) CheckAccessForPutObject(r *http.Request, bucket, object
return s3a.checkAccessForWriteObject(r, bucket, object, accountId) return s3a.checkAccessForWriteObject(r, bucket, object, accountId)
} }
// CheckAccessForNewMultipartUpload Check Acl for InitiateMultipartUploadResult API
// CheckAccessForPutObjectPartHandler Check Acl for Upload object part
// includes: // includes:
// - NewMultipartUploadHandler
func (s3a *S3ApiServer) CheckAccessForNewMultipartUpload(r *http.Request, bucket, object string) s3err.ErrorCode {
// - PutObjectPartHandler
func (s3a *S3ApiServer) CheckAccessForPutObjectPartHandler(r *http.Request, bucket string) s3err.ErrorCode {
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone {
return errCode
}
if bucketMetadata.ObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced {
return s3err.ErrNone
}
accountId := s3acl.GetAccountId(r) accountId := s3acl.GetAccountId(r)
if accountId == IdentityAnonymous.AccountId {
if !CheckBucketAccess(accountId, bucketMetadata, s3_constants.PermissionWrite) {
return s3err.ErrAccessDenied return s3err.ErrAccessDenied
} }
return s3a.checkAccessForWriteObject(r, bucket, object, accountId)
return s3err.ErrNone
} }
// CheckAccessForCompleteMultipartUpload Check Acl for CompleteMultipartUpload API
// CheckAccessForNewMultipartUpload Check Acl for API
// includes: // includes:
// - CompleteMultipartUploadHandler
// - NewMultipartUploadHandler
func (s3a *S3ApiServer) CheckAccessForNewMultipartUpload(r *http.Request, bucket, object string) (s3err.ErrorCode, string) {
accountId := s3acl.GetAccountId(r)
if accountId == IdentityAnonymous.AccountId {
return s3err.ErrAccessDenied, ""
}
errCode := s3a.checkAccessForWriteObject(r, bucket, object, accountId)
return errCode, accountId
}
func (s3a *S3ApiServer) CheckAccessForAbortMultipartUpload(r *http.Request, bucket, object string) s3err.ErrorCode {
return s3a.CheckAccessWithBucketOwnerAndInitiator(r, bucket, object)
}
func (s3a *S3ApiServer) CheckAccessForCompleteMultipartUpload(r *http.Request, bucket, object string) s3err.ErrorCode { func (s3a *S3ApiServer) CheckAccessForCompleteMultipartUpload(r *http.Request, bucket, object string) s3err.ErrorCode {
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket) bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return errCode return errCode
} }
if bucketMetadata.ObjectOwnership != s3_constants.OwnershipBucketOwnerEnforced {
accountId := getAccountId(r)
if !CheckBucketAccess(accountId, bucketMetadata, s3_constants.PermissionWrite) {
return s3err.ErrAccessDenied
}
}
return s3err.ErrNone
}
func (s3a *S3ApiServer) CheckAccessForListMultipartUploadParts(r *http.Request, bucket, object string) s3err.ErrorCode {
return s3a.CheckAccessWithBucketOwnerAndInitiator(r, bucket, object)
}
// CheckAccessWithBucketOwnerAndInitiator Check Access Permission with 'bucketOwner' and 'multipartUpload initiator'
func (s3a *S3ApiServer) CheckAccessWithBucketOwnerAndInitiator(r *http.Request, bucket, object string) s3err.ErrorCode {
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone {
return errCode
}
//bucket access allowed //bucket access allowed
accountId := s3acl.GetAccountId(r) accountId := s3acl.GetAccountId(r)
if accountId == *bucketMetadata.Owner.ID {
if s3acl.ValidateAccount(*bucketMetadata.Owner.ID, accountId) {
return s3err.ErrNone return s3err.ErrNone
}
//multipart initiator allowed
entry, err := getMultipartUpload(s3a, bucket, object)
if err != nil {
if err != filer_pb.ErrNotFound {
return s3err.ErrInternalError
}
} else { } else {
if len(bucketMetadata.Acl) > 0 {
reqGrants := s3acl.DetermineReqGrants(accountId, s3_constants.PermissionWrite)
for _, bucketGrant := range bucketMetadata.Acl {
for _, requiredGrant := range reqGrants {
if s3acl.GrantEquals(bucketGrant, requiredGrant) {
return s3err.ErrNone
}
}
}
uploadInitiator, ok := entry.Extended[s3_constants.ExtAmzMultipartInitiator]
if !ok || accountId == string(uploadInitiator) {
return s3err.ErrNone
} }
} }
glog.V(3).Infof("acl denied! request account id: %s", accountId)
glog.V(3).Infof("CheckAccessWithBucketOwnerAndInitiator denied! request account id: %s", accountId)
return s3err.ErrAccessDenied return s3err.ErrAccessDenied
} }
func (s3a *S3ApiServer) checkAccessForWriteObject(r *http.Request, bucket, object, accountId string) s3err.ErrorCode {
func (s3a *S3ApiServer) checkAccessForWriteObject(r *http.Request, bucket, object, requestAccountId string) s3err.ErrorCode {
bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket) bucketMetadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return errCode return errCode
} }
requestOwnerId, grants, errCode := s3acl.ExtractObjectAcl(r, s3a.accountManager, bucketMetadata.ObjectOwnership, *bucketMetadata.Owner.ID, requestAccountId, true)
if errCode != s3err.ErrNone {
return errCode
}
if bucketMetadata.ObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced { if bucketMetadata.ObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced {
// validate grants (only bucketOwnerFullControl acl is allowed)
_, grants, errCode := s3acl.ParseAndValidateAclHeaders(r, s3a.accountManager, bucketMetadata.ObjectOwnership, *bucketMetadata.Owner.ID, accountId, false)
if errCode != s3err.ErrNone {
return errCode
}
if len(grants) > 1 {
return s3err.AccessControlListNotSupported
}
bucketOwnerFullControlGrant := &s3.Grant{
Permission: &s3_constants.PermissionFullControl,
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: bucketMetadata.Owner.ID,
},
}
if len(grants) == 0 {
return s3err.ErrNone
}
return s3err.ErrNone
}
if s3acl.GrantEquals(bucketOwnerFullControlGrant, grants[0]) {
if !CheckBucketAccess(requestAccountId, bucketMetadata, s3_constants.PermissionWrite) {
return s3err.ErrAccessDenied
}
if requestOwnerId == "" {
requestOwnerId = requestAccountId
}
entry, err := getObjectEntry(s3a, bucket, object)
if err != nil {
if err == filer_pb.ErrNotFound {
s3acl.SetAcpOwnerHeader(r, requestOwnerId)
s3acl.SetAcpGrantsHeader(r, grants)
return s3err.ErrNone return s3err.ErrNone
} }
return s3err.ErrInternalError
}
return s3err.AccessControlListNotSupported
objectOwnerId := s3acl.GetAcpOwner(entry.Extended, *bucketMetadata.Owner.ID)
//object owner is immutable
if requestOwnerId != "" && objectOwnerId != requestOwnerId {
return s3err.ErrAccessDenied
} }
//bucket access allowed
bucketAclAllowed := false
if accountId == *bucketMetadata.Owner.ID {
bucketAclAllowed = true
//Only the owner of the bucket and the owner of the object can overwrite the object
if s3acl.ValidateAccount(requestOwnerId, objectOwnerId, *bucketMetadata.Owner.ID) {
glog.V(3).Infof("checkAccessForWriteObject denied! request account id: %s, expect account id: %s", requestAccountId, *bucketMetadata.Owner.ID)
return s3err.ErrAccessDenied
}
s3acl.SetAcpOwnerHeader(r, objectOwnerId)
s3acl.SetAcpGrantsHeader(r, grants)
return s3err.ErrNone
}
func CheckBucketAccess(requestAccountId string, bucketMetadata *BucketMetaData, permission string) bool {
if s3acl.ValidateAccount(requestAccountId, *bucketMetadata.Owner.ID) {
return true
} else { } else {
if len(bucketMetadata.Acl) > 0 { if len(bucketMetadata.Acl) > 0 {
reqGrants := s3acl.DetermineReqGrants(accountId, s3_constants.PermissionWrite)
bucketLoop:
reqGrants := s3acl.DetermineRequiredGrants(requestAccountId, permission)
for _, bucketGrant := range bucketMetadata.Acl { for _, bucketGrant := range bucketMetadata.Acl {
for _, requiredGrant := range reqGrants { for _, requiredGrant := range reqGrants {
if s3acl.GrantEquals(bucketGrant, requiredGrant) { if s3acl.GrantEquals(bucketGrant, requiredGrant) {
bucketAclAllowed = true
break bucketLoop
return true
} }
} }
} }
} }
} }
if !bucketAclAllowed {
glog.V(3).Infof("acl denied! request account id: %s", accountId)
return s3err.ErrAccessDenied
}
//object access allowed
entry, err := getObjectEntry(s3a, bucket, object)
if err != nil {
if err != filer_pb.ErrNotFound {
return s3err.ErrInternalError
}
} else {
if entry.IsDirectory {
return s3err.ErrExistingObjectIsDirectory
}
//Only the owner of the bucket and the owner of the object can overwrite the object
objectOwner := s3acl.GetAcpOwner(entry.Extended, *bucketMetadata.Owner.ID)
if accountId != objectOwner && accountId != *bucketMetadata.Owner.ID {
glog.V(3).Infof("acl denied! request account id: %s, expect account id: %s", accountId, *bucketMetadata.Owner.ID)
return s3err.ErrAccessDenied
}
}
ownerId, grants, errCode := s3acl.ParseAndValidateAclHeadersOrElseDefault(r, s3a.accountManager, bucketMetadata.ObjectOwnership, *bucketMetadata.Owner.ID, accountId, false)
if errCode != s3err.ErrNone {
return errCode
}
s3acl.SetAcpOwnerHeader(r, ownerId)
s3acl.SetAcpGrantsHeader(r, grants)
return s3err.ErrNone
glog.V(3).Infof("CheckBucketAccess denied! request account id: %s", requestAccountId)
return false
} }
func getObjectEntry(s3a *S3ApiServer, bucket, object string) (*filer_pb.Entry, error) { func getObjectEntry(s3a *S3ApiServer, bucket, object string) (*filer_pb.Entry, error) {
return s3a.getEntry(util.Join(s3a.option.BucketsPath, bucket), object) return s3a.getEntry(util.Join(s3a.option.BucketsPath, bucket), object)
} }
func (s3a *S3ApiServer) ExtractBucketAcp(r *http.Request) (owner string, grants []*s3.Grant, errCode s3err.ErrorCode) {
accountId := s3acl.GetAccountId(r)
ownership := s3_constants.DefaultOwnershipForCreate
if ownership == s3_constants.OwnershipBucketOwnerEnforced {
return accountId, []*s3.Grant{
{
Permission: &s3_constants.PermissionFullControl,
Grantee: &s3.Grantee{
Type: &s3_constants.GrantTypeCanonicalUser,
ID: &accountId,
},
},
}, s3err.ErrNone
} else {
return s3acl.ParseAndValidateAclHeadersOrElseDefault(r, s3a.accountManager, ownership, accountId, accountId, false)
}
func getMultipartUpload(s3a *S3ApiServer, bucket, object string) (*filer_pb.Entry, error) {
return s3a.getEntry(s3a.genUploadsFolder(bucket), object)
} }

57
weed/s3api/s3api_bucket_handlers.go

@ -123,7 +123,9 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
} }
} }
acpOwner, acpGrants, errCode := s3a.ExtractBucketAcp(r)
objectOwnership := r.Header.Get("X-Amz-Object-Ownership")
requestAccountId := s3acl.GetAccountId(r)
grants, errCode := s3acl.ExtractBucketAcl(r, s3a.accountManager, objectOwnership, requestAccountId, requestAccountId, true)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return
@ -136,7 +138,13 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
} }
entry.Extended[s3_constants.AmzIdentityId] = []byte(identityId) entry.Extended[s3_constants.AmzIdentityId] = []byte(identityId)
} }
s3acl.AssembleEntryWithAcp(entry, acpOwner, acpGrants)
if objectOwnership != "" {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
entry.Extended[s3_constants.ExtOwnershipKey] = []byte(objectOwnership)
}
s3acl.AssembleEntryWithAcp(entry, requestAccountId, grants)
} }
// create the folder for bucket, but lazily create actual collection // create the folder for bucket, but lazily create actual collection
@ -145,6 +153,10 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
// clear cache
s3a.bucketRegistry.ClearCache(bucket)
w.Header().Set("Location", "/"+bucket) w.Header().Set("Location", "/"+bucket)
writeSuccessResponseEmpty(w, r) writeSuccessResponseEmpty(w, r)
} }
@ -265,7 +277,7 @@ func (s3a *S3ApiServer) PutBucketAclHandler(w http.ResponseWriter, r *http.Reque
return return
} }
grants, errCode := s3acl.ExtractAcl(r, s3a.accountManager, bucketMetadata.ObjectOwnership, "", *bucketMetadata.Owner.ID, accountId)
grants, errCode := s3acl.ExtractBucketAcl(r, s3a.accountManager, bucketMetadata.ObjectOwnership, *bucketMetadata.Owner.ID, accountId, false)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return
@ -289,6 +301,8 @@ func (s3a *S3ApiServer) PutBucketAclHandler(w http.ResponseWriter, r *http.Reque
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
//update local cache
bucketMetadata.Acl = grants
s3err.WriteEmptyResponse(w, r, http.StatusOK) s3err.WriteEmptyResponse(w, r, http.StatusOK)
} }
@ -398,9 +412,8 @@ func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *htt
return return
} }
var v s3.OwnershipControls
defer util.CloseRequest(r) defer util.CloseRequest(r)
var v s3.OwnershipControls
err := xmlutil.UnmarshalXML(&v, xml.NewDecoder(r.Body), "") err := xmlutil.UnmarshalXML(&v, xml.NewDecoder(r.Body), "")
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
@ -413,12 +426,11 @@ func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *htt
} }
printOwnership := true printOwnership := true
ownership := *v.Rules[0].ObjectOwnership
switch ownership {
newObjectOwnership := *v.Rules[0].ObjectOwnership
switch newObjectOwnership {
case s3_constants.OwnershipObjectWriter: case s3_constants.OwnershipObjectWriter:
case s3_constants.OwnershipBucketOwnerPreferred: case s3_constants.OwnershipBucketOwnerPreferred:
case s3_constants.OwnershipBucketOwnerEnforced: case s3_constants.OwnershipBucketOwnerEnforced:
printOwnership = false
default: default:
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
return return
@ -435,29 +447,34 @@ func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *htt
} }
oldOwnership, ok := bucketEntry.Extended[s3_constants.ExtOwnershipKey] oldOwnership, ok := bucketEntry.Extended[s3_constants.ExtOwnershipKey]
if !ok || string(oldOwnership) != ownership {
if !ok || string(oldOwnership) != newObjectOwnership {
// must reset bucket acl to default(bucket owner with full control permission) before setting ownership // must reset bucket acl to default(bucket owner with full control permission) before setting ownership
// to `OwnershipBucketOwnerEnforced` (bucket cannot have ACLs set with ObjectOwnership's BucketOwnerEnforced setting) // to `OwnershipBucketOwnerEnforced` (bucket cannot have ACLs set with ObjectOwnership's BucketOwnerEnforced setting)
if ownership == s3_constants.OwnershipBucketOwnerEnforced {
acpGrants := s3acl.GetAcpGrants(bucketEntry.Extended)
if len(acpGrants) != 1 {
s3err.WriteErrorResponse(w, r, s3err.InvalidBucketAclWithObjectOwnership)
return
}
bucketOwner := s3acl.GetAcpOwner(bucketEntry.Extended, s3account.AccountAdmin.Id)
expectGrant := s3acl.GrantWithFullControl(bucketOwner)
if s3acl.GrantEquals(acpGrants[0], expectGrant) {
if newObjectOwnership == s3_constants.OwnershipBucketOwnerEnforced {
acpGrants := s3acl.GetAcpGrants(nil, bucketEntry.Extended)
if len(acpGrants) > 1 {
s3err.WriteErrorResponse(w, r, s3err.InvalidBucketAclWithObjectOwnership) s3err.WriteErrorResponse(w, r, s3err.InvalidBucketAclWithObjectOwnership)
return return
} else if len(acpGrants) == 1 {
bucketOwner := s3acl.GetAcpOwner(bucketEntry.Extended, s3account.AccountAdmin.Id)
expectGrant := s3acl.GrantWithFullControl(bucketOwner)
if !s3acl.GrantEquals(acpGrants[0], expectGrant) {
s3err.WriteErrorResponse(w, r, s3err.InvalidBucketAclWithObjectOwnership)
return
}
} }
} }
if bucketEntry.Extended == nil { if bucketEntry.Extended == nil {
bucketEntry.Extended = make(map[string][]byte) bucketEntry.Extended = make(map[string][]byte)
} }
bucketEntry.Extended[s3_constants.ExtOwnershipKey] = []byte(ownership)
//update local cache
bucketMetadata, eCode := s3a.bucketRegistry.GetBucketMetadata(bucket)
if eCode == s3err.ErrNone {
bucketMetadata.ObjectOwnership = newObjectOwnership
}
bucketEntry.Extended[s3_constants.ExtOwnershipKey] = []byte(newObjectOwnership)
err = s3a.updateEntry(s3a.option.BucketsPath, bucketEntry) err = s3a.updateEntry(s3a.option.BucketsPath, bucketEntry)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)

14
weed/s3api/s3api_object_handlers.go

@ -219,12 +219,16 @@ func (s3a *S3ApiServer) GetObjectAclHandler(w http.ResponseWriter, r *http.Reque
} }
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := s3_constants.GetBucketAndObject(r) bucket, object := s3_constants.GetBucketAndObject(r)
glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object) glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object)
destUrl := s3a.toFilerUrl(bucket, object)
errCode := s3a.checkBucketAccessForReadObject(r, bucket)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
destUrl := s3a.toFilerUrl(bucket, object)
s3a.proxyToFiler(w, r, destUrl, false, passThroughResponse) s3a.proxyToFiler(w, r, destUrl, false, passThroughResponse)
} }
@ -599,20 +603,18 @@ func (s3a *S3ApiServer) maybeGetFilerJwtAuthorizationToken(isWrite bool) string
func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := s3_constants.GetBucketAndObject(r) bucket, object := s3_constants.GetBucketAndObject(r)
accountId := s3acl.GetAccountId(r)
bucketMetadata, objectEntry, objectOwner, errCode := s3a.checkAccessForWriteObjectAcl(accountId, bucket, object)
objectEntry, ownerId, grants, errCode := s3a.checkAccessForWriteObjectAcl(r, bucket, object)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return
} }
grants, errCode := s3acl.ExtractAcl(r, s3a.accountManager, bucketMetadata.ObjectOwnership, *bucketMetadata.Owner.ID, objectOwner, accountId)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return
} }
errCode = s3acl.AssembleEntryWithAcp(objectEntry, objectOwner, grants)
errCode = s3acl.AssembleEntryWithAcp(objectEntry, ownerId, grants)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return return
} }

65
weed/s3api/s3api_object_multipart_handlers.go

@ -30,8 +30,7 @@ const (
func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := s3_constants.GetBucketAndObject(r) bucket, object := s3_constants.GetBucketAndObject(r)
//acl
errCode := s3a.CheckAccessForNewMultipartUpload(r, bucket, object)
errCode, initiatorId := s3a.CheckAccessForNewMultipartUpload(r, bucket, object)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return
@ -52,7 +51,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
if contentType != "" { if contentType != "" {
createMultipartUploadInput.ContentType = &contentType createMultipartUploadInput.ContentType = &contentType
} }
response, errCode := s3a.createMultipartUpload(createMultipartUploadInput)
response, errCode := s3a.createMultipartUpload(initiatorId, createMultipartUploadInput)
glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
@ -70,7 +69,6 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
bucket, object := s3_constants.GetBucketAndObject(r) bucket, object := s3_constants.GetBucketAndObject(r)
s3a.CheckAccessForCompleteMultipartUpload(r, bucket, object)
parts := &CompleteMultipartUpload{} parts := &CompleteMultipartUpload{}
if err := xmlDecoder(r.Body, parts, r.ContentLength); err != nil { if err := xmlDecoder(r.Body, parts, r.ContentLength); err != nil {
@ -86,6 +84,12 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
return return
} }
errCode := s3a.CheckAccessForCompleteMultipartUpload(r, bucket, uploadID)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)), Key: objectKey(aws.String(object)),
@ -115,6 +119,24 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
return return
} }
errCode := s3a.CheckAccessForAbortMultipartUpload(r, bucket, uploadID)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
if err != nil {
glog.V(1).Infof("list parts error: %v, request url: %s", err, r.RequestURI)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
return
}
if !exists {
glog.V(1).Infof("list parts not found, request url: %s", r.RequestURI)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
return
}
response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)), Key: objectKey(aws.String(object)),
@ -129,7 +151,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response))) glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)))
//https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html //https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
s3err.WriteXMLResponse(w, r, http.StatusNoContent, response)
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone) s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone)
} }
@ -151,7 +173,12 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
} }
} }
response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{
bucketMetaData, errorCode := s3a.checkAccessForReadBucket(r, bucket, s3_constants.PermissionRead)
if errorCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errorCode)
return
}
response, errCode := s3a.listMultipartUploads(bucketMetaData, &s3.ListMultipartUploadsInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
Delimiter: aws.String(delimiter), Delimiter: aws.String(delimiter),
EncodingType: aws.String(encodingType), EncodingType: aws.String(encodingType),
@ -193,6 +220,23 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
return return
} }
errCode := s3a.CheckAccessForListMultipartUploadParts(r, bucket, uploadID)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
if err != nil {
glog.V(1).Infof("list parts error: %v, request url: %s", err, r.RequestURI)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
return
}
if !exists {
glog.V(1).Infof("list parts not found, request url: %s", r.RequestURI)
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload)
return
}
response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ response, errCode := s3a.listObjectParts(&s3.ListPartsInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)), Key: objectKey(aws.String(object)),
@ -253,6 +297,12 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
} }
defer dataReader.Close() defer dataReader.Close()
errorCode := s3a.CheckAccessForPutObjectPartHandler(r, bucket)
if errorCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errorCode)
return
}
glog.V(2).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID) glog.V(2).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID)
uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part", uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part",
s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID) s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID)
@ -260,9 +310,8 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
if partID == 1 && r.Header.Get("Content-Type") == "" { if partID == 1 && r.Header.Get("Content-Type") == "" {
dataReader = mimeDetect(r, dataReader) dataReader = mimeDetect(r, dataReader)
} }
destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, destination)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "")
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode) s3err.WriteErrorResponse(w, r, errCode)
return return

9
weed/s3api/s3api_objects_list_handlers.go

@ -18,6 +18,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
) )
const cutoffTimeNewEmptyDir = 3
type ListBucketResultV2 struct { type ListBucketResultV2 struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
Name string `xml:"Name"` Name string `xml:"Name"`
@ -419,11 +421,16 @@ func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFile
var startFrom string var startFrom string
var isExhausted bool var isExhausted bool
var foundEntry bool var foundEntry bool
cutOffTimeAtSec := time.Now().Unix() + cutoffTimeNewEmptyDir
for fileCounter == 0 && !isExhausted && err == nil { for fileCounter == 0 && !isExhausted && err == nil {
err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error { err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
foundEntry = true foundEntry = true
if entry.IsDirectory { if entry.IsDirectory {
subDirs = append(subDirs, entry.Name)
if entry.Attributes != nil && cutOffTimeAtSec >= entry.Attributes.GetCrtime() {
fileCounter++
} else {
subDirs = append(subDirs, entry.Name)
}
} else { } else {
fileCounter++ fileCounter++
} }

10
weed/s3api/s3api_server.go

@ -128,15 +128,15 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
// PutObjectPart // PutObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.Auth(withAcl(s3a.cb.Limit, s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.Auth(withAcl(s3a.cb.Limit, s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// CompleteMultipartUpload // CompleteMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.Auth(withAcl(s3a.cb.Limit, s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload // NewMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.Auth(withAcl(s3a.cb.Limit, s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
// AbortMultipartUpload // AbortMultipartUpload
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.Auth(withAcl(s3a.cb.Limit, s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts // ListObjectParts
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.Auth(withAcl(s3a.cb.Limit, s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads // ListMultipartUploads
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
bucket.Methods("GET").HandlerFunc(track(s3a.Auth(withAcl(s3a.cb.Limit, s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
// GetObjectTagging // GetObjectTagging
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "") bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "")

14
weed/s3api/s3err/s3api_errors.go

@ -111,6 +111,8 @@ const (
OwnershipControlsNotFoundError OwnershipControlsNotFoundError
InvalidBucketAclWithObjectOwnership InvalidBucketAclWithObjectOwnership
AccessControlListNotSupported AccessControlListNotSupported
ErrUnexpectedContent
ErrInvalidAclArgument
) )
// error code to APIError structure, these fields carry respective // error code to APIError structure, these fields carry respective
@ -426,13 +428,23 @@ var errorCodeResponse = map[ErrorCode]APIError{
InvalidBucketAclWithObjectOwnership: { InvalidBucketAclWithObjectOwnership: {
Code: "InvalidBucketAclWithObjectOwnership", Code: "InvalidBucketAclWithObjectOwnership",
Description: "Bucket cannot have ACLs set with ObjectOwnership's BucketOwnerEnforced setting", Description: "Bucket cannot have ACLs set with ObjectOwnership's BucketOwnerEnforced setting",
HTTPStatusCode: http.StatusNotFound,
HTTPStatusCode: http.StatusBadRequest,
}, },
AccessControlListNotSupported: { AccessControlListNotSupported: {
Code: "AccessControlListNotSupported", Code: "AccessControlListNotSupported",
Description: "The bucket does not allow ACLs", Description: "The bucket does not allow ACLs",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrUnexpectedContent: {
Code: "UnexpectedContent",
Description: "This request does not support content",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidAclArgument: {
Code: "InvalidArgument",
Description: "ACL argument is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
} }
// GetAPIError provides API Error for input API error code. // GetAPIError provides API Error for input API error code.

6
weed/s3api/tags.go

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"regexp" "regexp"
"sort"
"strings" "strings"
) )
@ -39,6 +40,11 @@ func FromTags(tags map[string]string) (t *Tagging) {
Value: v, Value: v,
}) })
} }
if tagArr := t.TagSet.Tag; len(tagArr) > 0 {
sort.SliceStable(tagArr, func(i, j int) bool {
return tagArr[i].Key < tagArr[j].Key
})
}
return return
} }

2
weed/server/common.go

@ -103,7 +103,7 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter
// wrapper for writeJson - just logs errors // wrapper for writeJson - just logs errors
func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) {
if err := writeJson(w, r, httpStatus, obj); err != nil { if err := writeJson(w, r, httpStatus, obj); err != nil {
glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err)
glog.V(0).Infof("error writing JSON status %s %d: %v", r.URL, httpStatus, err)
glog.V(1).Infof("JSON content: %+v", obj) glog.V(1).Infof("JSON content: %+v", obj)
} }
} }

6
weed/server/filer_grpc_server_remote.go

@ -123,8 +123,9 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req
// tell filer to tell volume server to download into needles // tell filer to tell volume server to download into needles
assignedServerAddress := pb.NewServerAddressWithGrpcPort(assignResult.Url, assignResult.GrpcPort) assignedServerAddress := pb.NewServerAddressWithGrpcPort(assignResult.Url, assignResult.GrpcPort)
var etag string
err = operation.WithVolumeServerClient(false, assignedServerAddress, fs.grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { err = operation.WithVolumeServerClient(false, assignedServerAddress, fs.grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{
resp, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{
VolumeId: uint32(fileId.VolumeId), VolumeId: uint32(fileId.VolumeId),
NeedleId: uint64(fileId.Key), NeedleId: uint64(fileId.Key),
Cookie: uint32(fileId.Cookie), Cookie: uint32(fileId.Cookie),
@ -141,6 +142,8 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req
}) })
if fetchAndWriteErr != nil { if fetchAndWriteErr != nil {
return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr) return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr)
} else {
etag = resp.ETag
} }
return nil return nil
}) })
@ -155,6 +158,7 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req
Offset: localOffset, Offset: localOffset,
Size: uint64(size), Size: uint64(size),
ModifiedTsNs: time.Now().Unix(), ModifiedTsNs: time.Now().Unix(),
ETag: etag,
Fid: &filer_pb.FileId{ Fid: &filer_pb.FileId{
VolumeId: uint32(fileId.VolumeId), VolumeId: uint32(fileId.VolumeId),
FileKey: uint64(fileId.Key), FileKey: uint64(fileId.Key),

1
weed/server/filer_grpc_server_rename.go

@ -202,6 +202,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee
} }
// delete old entry // delete old entry
ctx = context.WithValue(ctx, "OP", "MV")
deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, signatures) deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, signatures)
if deleteErr != nil { if deleteErr != nil {
return deleteErr return deleteErr

17
weed/server/filer_server_handlers_read.go

@ -12,7 +12,7 @@ import (
"mime" "mime"
"net/http" "net/http"
"path/filepath" "path/filepath"
"strconv"
strconv "strconv"
"strings" "strings"
"time" "time"
@ -99,7 +99,11 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
glog.V(2).Infof("Not found %s: %v", path, err) glog.V(2).Infof("Not found %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadNotFound).Inc() stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadNotFound).Inc()
w.WriteHeader(http.StatusNotFound)
if r.Header.Get(s3_constants.XSeaweedFSHeaderAmzBucketAccessDenied) == "true" {
w.WriteHeader(http.StatusForbidden)
} else {
w.WriteHeader(http.StatusNotFound)
}
} else { } else {
glog.Errorf("Internal %s: %v", path, err) glog.Errorf("Internal %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadInternal).Inc() stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadInternal).Inc()
@ -174,10 +178,15 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
// print out the header from extended properties // print out the header from extended properties
for k, v := range entry.Extended { for k, v := range entry.Extended {
if !strings.HasPrefix(k, "xattr-") {
if strings.HasPrefix(k, "xattr-") {
// "xattr-" prefix is set in filesys.XATTR_PREFIX // "xattr-" prefix is set in filesys.XATTR_PREFIX
w.Header().Set(k, string(v))
continue
}
if strings.HasPrefix(k, "Seaweed-X-") {
// key with "Seaweed-X-" prefix is builtin and should not expose to user
continue
} }
w.Header().Set(k, string(v))
} }
//Seaweed custom header are not visible to Vue or javascript //Seaweed custom header are not visible to Vue or javascript

4
weed/server/filer_server_handlers_write_autochunk.go

@ -356,6 +356,10 @@ func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool
metadata[s3_constants.AmzStorageClass] = []byte(sc) metadata[s3_constants.AmzStorageClass] = []byte(sc)
} }
if ce := r.Header.Get("Content-Encoding"); ce != "" {
metadata["Content-Encoding"] = []byte(ce)
}
if tags := r.Header.Get(s3_constants.AmzObjectTagging); tags != "" { if tags := r.Header.Get(s3_constants.AmzObjectTagging); tags != "" {
for _, v := range strings.Split(tags, "&") { for _, v := range strings.Split(tags, "&") {
tag := strings.Split(v, "=") tag := strings.Split(v, "=")

2
weed/server/filer_ui/filer.html

@ -337,7 +337,7 @@
} }
var url = basePath + encodeURIComponent(newName); var url = basePath + encodeURIComponent(newName);
var originPath = basePath + originName; var originPath = basePath + originName;
url += '?mv.from=' + originPath;
url += '?mv.from=' + encodeURIComponent(originPath);
var xhr = new XMLHttpRequest(); var xhr = new XMLHttpRequest();
xhr.open('POST', url, false); xhr.open('POST', url, false);
xhr.setRequestHeader('Content-Type', ''); xhr.setRequestHeader('Content-Type', '');

2
weed/server/volume_grpc_remote.go

@ -51,6 +51,8 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
if err == nil { if err == nil {
err = fmt.Errorf("local write needle %d size %d: %v", req.NeedleId, req.Size, err) err = fmt.Errorf("local write needle %d size %d: %v", req.NeedleId, req.Size, err)
} }
} else {
resp.ETag = n.Etag()
} }
}() }()
if len(req.Replicas) > 0 { if len(req.Replicas) > 0 {

12
weed/server/webdav_server.go

@ -26,6 +26,7 @@ import (
type WebDavOption struct { type WebDavOption struct {
Filer pb.ServerAddress Filer pb.ServerAddress
FilerRootPath string
DomainName string DomainName string
BucketsPath string BucketsPath string
GrpcDialOption grpc.DialOption GrpcDialOption grpc.DialOption
@ -58,6 +59,11 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) {
fs, _ := NewWebDavFileSystem(option) fs, _ := NewWebDavFileSystem(option)
// Fix no set filer.path , accessing "/" returns "//"
if option.FilerRootPath == "/" {
option.FilerRootPath = ""
}
ws = &WebDavServer{ ws = &WebDavServer{
option: option, option: option,
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"),
@ -195,7 +201,8 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm
} }
func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) { func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) {
// Add filer.path
fullFilePath = fs.option.FilerRootPath + fullFilePath
glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag) glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag)
var err error var err error
@ -367,7 +374,8 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F
} }
func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) { func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) {
// Add filer.path
name = fs.option.FilerRootPath + name
glog.V(2).Infof("WebDavFileSystem.Stat %v", name) glog.V(2).Infof("WebDavFileSystem.Stat %v", name)
return fs.stat(ctx, name) return fs.stat(ctx, name)

177
weed/shell/command_fs_verify.go

@ -0,0 +1,177 @@
package shell
import (
"context"
"flag"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
"math"
"strings"
"time"
)
func init() {
Commands = append(Commands, &commandFsVerify{})
}
type commandFsVerify struct {
env *CommandEnv
volumeIds map[uint32][]pb.ServerAddress
verbose *bool
modifyTimeAgoAtSec int64
writer io.Writer
}
func (c *commandFsVerify) Name() string {
return "fs.verify"
}
func (c *commandFsVerify) Help() string {
return `recursively verify all files under a directory
fs.verify [-v] [-modifyTimeAgo 1h] /buckets/dir
`
}
func (c *commandFsVerify) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
c.env = commandEnv
c.writer = writer
fsVerifyCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.verbose = fsVerifyCommand.Bool("v", false, "print out each processed files")
modifyTimeAgo := fsVerifyCommand.Duration("modifyTimeAgo", 0, "only include files after this modify time to verify")
if err = fsVerifyCommand.Parse(args); err != nil {
return err
}
path, parseErr := commandEnv.parseUrl(findInputDirectory(fsVerifyCommand.Args()))
if parseErr != nil {
return parseErr
}
c.modifyTimeAgoAtSec = int64(modifyTimeAgo.Seconds())
if err := c.collectVolumeIds(); err != nil {
return parseErr
}
fCount, eConut, terr := c.verifyTraverseBfs(path)
if terr == nil {
fmt.Fprintf(writer, "verified %d files, error %d files \n", fCount, eConut)
}
return terr
}
func (c *commandFsVerify) collectVolumeIds() error {
c.volumeIds = make(map[uint32][]pb.ServerAddress)
topologyInfo, _, err := collectTopologyInfo(c.env, 0)
if err != nil {
return err
}
eachDataNode(topologyInfo, func(dc string, rack RackId, nodeInfo *master_pb.DataNodeInfo) {
for _, diskInfo := range nodeInfo.DiskInfos {
for _, vi := range diskInfo.VolumeInfos {
c.volumeIds[vi.Id] = append(c.volumeIds[vi.Id], pb.NewServerAddressFromDataNode(nodeInfo))
}
}
})
return nil
}
func (c *commandFsVerify) verifyEntry(fileId *filer_pb.FileId, volumeServer *pb.ServerAddress) error {
err := operation.WithVolumeServerClient(false, *volumeServer, c.env.option.GrpcDialOption,
func(client volume_server_pb.VolumeServerClient) error {
_, err := client.VolumeNeedleStatus(context.Background(),
&volume_server_pb.VolumeNeedleStatusRequest{
VolumeId: fileId.VolumeId,
NeedleId: fileId.FileKey})
return err
},
)
if err != nil && !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) {
return err
}
if *c.verbose {
fmt.Fprintf(c.writer, ".")
}
return nil
}
type ItemEntry struct {
chunks []*filer_pb.FileChunk
path util.FullPath
}
func (c *commandFsVerify) verifyTraverseBfs(path string) (fileCount int64, errCount int64, err error) {
timeNowAtSec := time.Now().Unix()
return fileCount, errCount, doTraverseBfsAndSaving(c.env, nil, path, false,
func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
if c.modifyTimeAgoAtSec > 0 {
if entry.Entry.Attributes != nil && c.modifyTimeAgoAtSec < timeNowAtSec-entry.Entry.Attributes.Mtime {
return nil
}
}
dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.GetChunks(), 0, math.MaxInt64)
if resolveErr != nil {
return fmt.Errorf("failed to ResolveChunkManifest: %+v", resolveErr)
}
dataChunks = append(dataChunks, manifestChunks...)
if len(dataChunks) > 0 {
outputChan <- &ItemEntry{
chunks: dataChunks,
path: util.NewFullPath(entry.Dir, entry.Entry.Name),
}
}
return nil
},
func(outputChan chan interface{}) {
for itemEntry := range outputChan {
i := itemEntry.(*ItemEntry)
fileMsg := fmt.Sprintf("file:%s needle status ", i.path)
if *c.verbose {
fmt.Fprintf(c.writer, fileMsg)
fileMsg = ""
}
for _, chunk := range i.chunks {
if volumeIds, ok := c.volumeIds[chunk.Fid.VolumeId]; ok {
for _, volumeServer := range volumeIds {
if err = c.verifyEntry(chunk.Fid, &volumeServer); err != nil {
fmt.Fprintf(c.writer, "%sfailed verify %d:%d: %+v\n",
fileMsg, chunk.Fid.VolumeId, chunk.Fid.FileKey, err)
break
}
}
} else {
err = fmt.Errorf("volumeId %d not found", chunk.Fid.VolumeId)
fmt.Fprintf(c.writer, "%sfailed verify chunk %d:%d: %+v\n",
fileMsg, chunk.Fid.VolumeId, chunk.Fid.FileKey, err)
break
}
}
if err != nil {
errCount++
continue
}
if *c.verbose {
fmt.Fprintf(c.writer, " verifed\n")
}
fileCount++
}
})
}

15
weed/shell/command_volume_list.go

@ -18,6 +18,9 @@ func init() {
type commandVolumeList struct { type commandVolumeList struct {
collectionPattern *string collectionPattern *string
dataCenter *string
rack *string
dataNode *string
readonly *bool readonly *bool
volumeId *uint64 volumeId *uint64
} }
@ -41,6 +44,9 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'") c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly") c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id") c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
c.dataCenter = volumeListCommand.String("dataCenter", "", "show volumes only from the specified data center")
c.rack = volumeListCommand.String("rack", "", "show volumes only from the specified rack")
c.dataNode = volumeListCommand.String("dataNode", "", "show volumes only from the specified data node")
if err = volumeListCommand.Parse(args); err != nil { if err = volumeListCommand.Parse(args); err != nil {
return nil return nil
@ -80,6 +86,9 @@ func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.Top
}) })
var s statistics var s statistics
for _, dc := range t.DataCenterInfos { for _, dc := range t.DataCenterInfos {
if *c.dataCenter != "" && *c.dataCenter != dc.Id {
continue
}
s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel)) s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
} }
output(verbosityLevel >= 0, writer, "%+v \n", s) output(verbosityLevel >= 0, writer, "%+v \n", s)
@ -93,6 +102,9 @@ func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.D
return a.Id < b.Id return a.Id < b.Id
}) })
for _, r := range t.RackInfos { for _, r := range t.RackInfos {
if *c.rack != "" && *c.rack != r.Id {
continue
}
s = s.plus(c.writeRackInfo(writer, r, verbosityLevel)) s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
} }
output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s) output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
@ -106,6 +118,9 @@ func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInf
return a.Id < b.Id return a.Id < b.Id
}) })
for _, dn := range t.DataNodeInfos { for _, dn := range t.DataNodeInfos {
if *c.dataNode != "" && *c.dataNode != dn.Id {
continue
}
s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel)) s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
} }
output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s) output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)

4
weed/stats/metrics.go

@ -282,12 +282,12 @@ func LoopPushingMetric(name, instance, addr string, intervalSeconds int) {
} }
} }
func StartMetricsServer(port int) {
func StartMetricsServer(ip string, port int) {
if port == 0 { if port == 0 {
return return
} }
http.Handle("/metrics", promhttp.HandlerFor(Gather, promhttp.HandlerOpts{})) http.Handle("/metrics", promhttp.HandlerFor(Gather, promhttp.HandlerOpts{}))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%d", ip, port), nil))
} }
func SourceName(port uint32) string { func SourceName(port uint32) string {

2
weed/storage/volume_read_all.go

@ -37,6 +37,8 @@ func (scanner *VolumeFileScanner4ReadAll) VisitNeedle(n *needle.Needle, offset i
NeedleBlobCompressed: n.IsCompressed(), NeedleBlobCompressed: n.IsCompressed(),
LastModified: n.LastModified, LastModified: n.LastModified,
Crc: n.Checksum.Value(), Crc: n.Checksum.Value(),
Name: n.Name,
Mime: n.Mime,
}) })
if sendErr != nil { if sendErr != nil {
return sendErr return sendErr

2
weed/util/constants.go

@ -5,7 +5,7 @@ import (
) )
var ( var (
VERSION_NUMBER = fmt.Sprintf("%.02f", 3.35)
VERSION_NUMBER = fmt.Sprintf("%.02f", 3.37)
VERSION = sizeLimit + " " + VERSION_NUMBER VERSION = sizeLimit + " " + VERSION_NUMBER
COMMIT = "" COMMIT = ""
) )

3
weed/weed.go

@ -46,7 +46,8 @@ func init() {
} }
func main() { func main() {
glog.MaxSize = 1024 * 1024 * 32
glog.MaxSize = 1024 * 1024 * 10
glog.MaxFileCount = 5
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
flag.Usage = usage flag.Usage = usage

Loading…
Cancel
Save