diff --git a/.github/workflows/binaries_dev.yml b/.github/workflows/binaries_dev.yml index 3e46646d5..3665ec818 100644 --- a/.github/workflows/binaries_dev.yml +++ b/.github/workflows/binaries_dev.yml @@ -44,7 +44,7 @@ jobs: run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} - name: Go Release Binaries Large Disk - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -60,7 +60,7 @@ jobs: asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -93,7 +93,7 @@ jobs: run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} - name: Go Release Binaries Large Disk - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -109,7 +109,7 @@ jobs: asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} diff --git a/.github/workflows/binaries_release0.yml b/.github/workflows/binaries_release0.yml index 2e016bc53..ede93d34d 100644 --- a/.github/workflows/binaries_release0.yml +++ b/.github/workflows/binaries_release0.yml @@ -30,7 +30,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -44,7 +44,7 @@ jobs: binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} diff --git a/.github/workflows/binaries_release1.yml b/.github/workflows/binaries_release1.yml index fb56afdb5..790882d35 100644 --- a/.github/workflows/binaries_release1.yml +++ b/.github/workflows/binaries_release1.yml @@ -30,7 +30,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -44,7 +44,7 @@ jobs: binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} diff --git a/.github/workflows/binaries_release2.yml b/.github/workflows/binaries_release2.yml index 3ccb63c84..58ee39f8a 100644 --- a/.github/workflows/binaries_release2.yml +++ b/.github/workflows/binaries_release2.yml @@ -30,7 +30,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -44,7 +44,7 @@ jobs: binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} diff --git a/.github/workflows/binaries_release3.yml b/.github/workflows/binaries_release3.yml index 27153fdaf..875dc2024 100644 --- a/.github/workflows/binaries_release3.yml +++ b/.github/workflows/binaries_release3.yml @@ -30,7 +30,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -44,7 +44,7 @@ jobs: binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} diff --git a/.github/workflows/binaries_release4.yml b/.github/workflows/binaries_release4.yml index bfdbe9218..069914118 100644 --- a/.github/workflows/binaries_release4.yml +++ b/.github/workflows/binaries_release4.yml @@ -30,7 +30,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -45,7 +45,7 @@ jobs: binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@af4a9db7b0ee3cf602cb75541d72bf568a99da4f # v1.22 + uses: wangyoucao577/go-release-action@23b3194d60e5291b78d4ebf84705f5d6c5a74c0e # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} diff --git a/.github/workflows/helm_chart_release.yml b/.github/workflows/helm_chart_release.yml index d646ff8e7..f532bea99 100644 --- a/.github/workflows/helm_chart_release.yml +++ b/.github/workflows/helm_chart_release.yml @@ -1,10 +1,8 @@ name: "helm: publish charts" on: push: - branches: - - master - paths: - - "k8s/charts/**" + tags: + - '*' permissions: contents: write diff --git a/go.mod b/go.mod index 0b4acddb3..afdcb4bbf 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( github.com/karlseguin/ccache/v2 v2.0.8 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.17.3 // indirect - github.com/klauspost/reedsolomon v1.11.8 + github.com/klauspost/reedsolomon v1.12.1 github.com/kurin/blazer v0.5.3 github.com/lib/pq v1.10.9 github.com/linxGnu/grocksdb v1.8.11 @@ -75,7 +75,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/posener/complete v1.2.3 github.com/pquerna/cachecontrol v0.2.0 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 @@ -154,9 +154,9 @@ require ( github.com/puzpuzpuz/xsync/v2 v2.5.1 github.com/rabbitmq/amqp091-go v1.9.0 github.com/rclone/rclone v1.65.0 - github.com/rdleal/intervalst v1.2.1 + github.com/rdleal/intervalst v1.2.2 github.com/schollz/progressbar/v3 v3.14.1 - github.com/shirou/gopsutil/v3 v3.23.11 + github.com/shirou/gopsutil/v3 v3.23.12 github.com/tikv/client-go/v2 v2.0.7 github.com/ydb-platform/ydb-go-sdk-auth-environ v0.2.0 github.com/ydb-platform/ydb-go-sdk/v3 v3.54.3 @@ -254,7 +254,7 @@ require ( github.com/jtolio/eventkit v0.0.0-20231019094657-5d77ebb407d9 // indirect github.com/jtolio/noiseconn v0.0.0-20230621152802-afeab29449e0 // indirect github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/koofr/go-httpclient v0.0.0-20230225102643-5d51a2e9dea6 // indirect github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect github.com/kr/fs v0.1.0 // indirect diff --git a/go.sum b/go.sum index d7ca59db2..d27e11c0f 100644 --- a/go.sum +++ b/go.sum @@ -618,10 +618,10 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= -github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= +github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koofr/go-httpclient v0.0.0-20230225102643-5d51a2e9dea6 h1:uF5FHZ/L5gvZTyBNhhcm55rRorL66DOs4KIeeVXZ8eI= github.com/koofr/go-httpclient v0.0.0-20230225102643-5d51a2e9dea6/go.mod h1:6HAT62hK6QH+ljNtZayJCKpbZy5hJIB12+1Ze1bFS7M= @@ -787,8 +787,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -820,8 +820,8 @@ github.com/rclone/rclone v1.65.0 h1:e4kV+MQ3v3XRPEzjMiI4L54k2lezQCR/ZxtTym49TkE= github.com/rclone/rclone v1.65.0/go.mod h1:EqoIENpN8BRBQ3DbN/Jeh2sZyCxH01LdsJbLMfDsYl0= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rdleal/intervalst v1.2.1 h1:Lw+Bwc6REG9bDSqxgIcuDZ4sX9u7+irwqNwlIJepeEU= -github.com/rdleal/intervalst v1.2.1/go.mod h1:xO89Z6BC+LQDH+IPQQw/OESt5UADgFD41tYMUINGpxQ= +github.com/rdleal/intervalst v1.2.2 h1:cw93gelL8qIAPuOwT+oHFewumFdrdW/QMFqgRhLUrlY= +github.com/rdleal/intervalst v1.2.2/go.mod h1:xO89Z6BC+LQDH+IPQQw/OESt5UADgFD41tYMUINGpxQ= github.com/redis/go-redis/v9 v9.0.2 h1:BA426Zqe/7r56kCcvxYLWe1mkaz71LKF77GwgFzSxfE= github.com/redis/go-redis/v9 v9.0.2/go.mod h1:/xDTe9EF1LM61hek62Poq2nzQSGj0xSrEtEHbBQevps= github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo= @@ -854,8 +854,8 @@ github.com/seaweedfs/raft v1.1.3/go.mod h1:9cYlEBA+djJbnf/5tWsCybtbL7ICYpi+Uxcg3 github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo= github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= -github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= diff --git a/k8s/charts/seaweedfs/templates/_helpers.tpl b/k8s/charts/seaweedfs/templates/_helpers.tpl index e9f35447d..f6f7b8991 100644 --- a/k8s/charts/seaweedfs/templates/_helpers.tpl +++ b/k8s/charts/seaweedfs/templates/_helpers.tpl @@ -142,7 +142,7 @@ Inject extra environment vars in the format key:value, if populated {{/* check if any InitContainers exist for Volumes */}} {{- define "volume.initContainers_exists" -}} -{{- if or (not (empty .Values.volume.dir_idx )) (not (empty .Values.volume.initContainers )) -}} +{{- if or (not (empty .Values.volume.idx )) (not (empty .Values.volume.initContainers )) -}} {{- printf "true" -}} {{- else -}} {{- printf "" -}} diff --git a/k8s/charts/seaweedfs/templates/filer-service-client.yaml b/k8s/charts/seaweedfs/templates/filer-service-client.yaml index 41251c897..d7618c4cb 100644 --- a/k8s/charts/seaweedfs/templates/filer-service-client.yaml +++ b/k8s/charts/seaweedfs/templates/filer-service-client.yaml @@ -1,3 +1,4 @@ +{{- if .Values.filer.enabled }} apiVersion: v1 kind: Service metadata: @@ -32,3 +33,4 @@ spec: selector: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: filer +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer-service.yaml b/k8s/charts/seaweedfs/templates/filer-service.yaml index 352a20676..ab7e98df8 100644 --- a/k8s/charts/seaweedfs/templates/filer-service.yaml +++ b/k8s/charts/seaweedfs/templates/filer-service.yaml @@ -1,3 +1,4 @@ +{{- if .Values.filer.enabled }} apiVersion: v1 kind: Service metadata: @@ -44,3 +45,4 @@ spec: selector: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: filer +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml index 8035d7ffb..d0aa6c8b8 100644 --- a/k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml +++ b/k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml @@ -1,3 +1,4 @@ +{{- if .Values.filer.enabled }} {{- if .Values.filer.metricsPort }} {{- if .Values.global.monitoring.enabled }} apiVersion: monitoring.coreos.com/v1 @@ -21,4 +22,5 @@ spec: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: filer {{- end }} -{{- end }} \ No newline at end of file +{{- end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer-statefulset.yaml b/k8s/charts/seaweedfs/templates/filer-statefulset.yaml index c8e10f21a..097a63cd3 100644 --- a/k8s/charts/seaweedfs/templates/filer-statefulset.yaml +++ b/k8s/charts/seaweedfs/templates/filer-statefulset.yaml @@ -185,7 +185,7 @@ spec: -s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \ {{- end }} {{- end }} - -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + -master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} volumeMounts: {{- if eq .Values.filer.logs.type "hostPath" }} - name: seaweedfs-filer-log-volume diff --git a/k8s/charts/seaweedfs/templates/filler-ingress.yaml b/k8s/charts/seaweedfs/templates/filler-ingress.yaml index 5580bb188..e5cc9a275 100644 --- a/k8s/charts/seaweedfs/templates/filler-ingress.yaml +++ b/k8s/charts/seaweedfs/templates/filler-ingress.yaml @@ -1,3 +1,4 @@ +{{- if .Values.filer.enabled }} {{- if .Values.filer.ingress.enabled }} {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} apiVersion: networking.k8s.io/v1 @@ -43,4 +44,5 @@ spec: {{- if .Values.filer.ingress.host }} host: {{ .Values.filer.ingress.host }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master-ingress.yaml b/k8s/charts/seaweedfs/templates/master-ingress.yaml index 8a72b5ea3..7bac95809 100644 --- a/k8s/charts/seaweedfs/templates/master-ingress.yaml +++ b/k8s/charts/seaweedfs/templates/master-ingress.yaml @@ -1,3 +1,4 @@ +{{- if .Values.master.enabled }} {{- if .Values.master.ingress.enabled }} {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} apiVersion: networking.k8s.io/v1 @@ -44,3 +45,4 @@ spec: host: {{ .Values.master.ingress.host }} {{- end }} {{- end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master-service.yaml b/k8s/charts/seaweedfs/templates/master-service.yaml index 978f9c881..9e69f94e5 100644 --- a/k8s/charts/seaweedfs/templates/master-service.yaml +++ b/k8s/charts/seaweedfs/templates/master-service.yaml @@ -1,3 +1,4 @@ +{{- if .Values.master.enabled }} apiVersion: v1 kind: Service metadata: @@ -9,7 +10,7 @@ metadata: helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/managed-by: {{ .Release.Service }} annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: clusterIP: None publishNotReadyAddresses: true @@ -31,3 +32,4 @@ spec: selector: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: master +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/master-servicemonitor.yaml index e9844fb8b..93715f031 100644 --- a/k8s/charts/seaweedfs/templates/master-servicemonitor.yaml +++ b/k8s/charts/seaweedfs/templates/master-servicemonitor.yaml @@ -1,3 +1,4 @@ +{{- if .Values.master.enabled }} {{- if .Values.master.metricsPort }} {{- if .Values.global.monitoring.enabled }} apiVersion: monitoring.coreos.com/v1 @@ -21,4 +22,5 @@ spec: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: master {{- end }} -{{- end }} \ No newline at end of file +{{- end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/s3-service.yaml b/k8s/charts/seaweedfs/templates/s3-service.yaml index f111999fa..01d79ad74 100644 --- a/k8s/charts/seaweedfs/templates/s3-service.yaml +++ b/k8s/charts/seaweedfs/templates/s3-service.yaml @@ -1,3 +1,4 @@ +{{- if or .Values.s3.enabled .Values.filer.s3.enabled }} apiVersion: v1 kind: Service metadata: @@ -30,3 +31,4 @@ spec: selector: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: {{ if .Values.s3.enabled }}s3{{ else }}filer{{ end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml index 876f72dcd..7caf1a167 100644 --- a/k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml +++ b/k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml @@ -1,3 +1,4 @@ +{{- if or .Values.s3.enabled .Values.filer.s3.enabled }} {{- if .Values.s3.metricsPort }} {{- if .Values.global.monitoring.enabled }} apiVersion: monitoring.coreos.com/v1 @@ -21,4 +22,5 @@ spec: app: {{ template "seaweedfs.name" . }} component: s3 {{- end }} -{{- end }} \ No newline at end of file +{{- end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/secret-seaweedfs-db.yaml b/k8s/charts/seaweedfs/templates/secret-seaweedfs-db.yaml index 9a1aca91d..5b7a81038 100644 --- a/k8s/charts/seaweedfs/templates/secret-seaweedfs-db.yaml +++ b/k8s/charts/seaweedfs/templates/secret-seaweedfs-db.yaml @@ -1,3 +1,4 @@ +{{- if .Values.filer.enabled }} apiVersion: v1 kind: Secret type: Opaque @@ -17,3 +18,4 @@ stringData: password: "HardCodedPassword" # better to random generate and create in DB # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/volume-service.yaml b/k8s/charts/seaweedfs/templates/volume-service.yaml index 67bfcacf1..1205f4fad 100644 --- a/k8s/charts/seaweedfs/templates/volume-service.yaml +++ b/k8s/charts/seaweedfs/templates/volume-service.yaml @@ -1,3 +1,4 @@ +{{- if .Values.volume.enabled }} apiVersion: v1 kind: Service metadata: @@ -29,3 +30,4 @@ spec: selector: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: volume +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml index d2ee8b4c7..c23e4dec0 100644 --- a/k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml +++ b/k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml @@ -1,3 +1,4 @@ +{{- if .Values.volume.enabled }} {{- if .Values.volume.metricsPort }} {{- if .Values.global.monitoring.enabled }} apiVersion: monitoring.coreos.com/v1 @@ -21,4 +22,5 @@ spec: app.kubernetes.io/name: {{ template "seaweedfs.name" . }} app.kubernetes.io/component: volume {{- end }} -{{- end }} \ No newline at end of file +{{- end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/volume-statefulset.yaml b/k8s/charts/seaweedfs/templates/volume-statefulset.yaml index e03db4ba7..2bd058cff 100644 --- a/k8s/charts/seaweedfs/templates/volume-statefulset.yaml +++ b/k8s/charts/seaweedfs/templates/volume-statefulset.yaml @@ -54,17 +54,19 @@ spec: {{- $initContainers_exists := include "volume.initContainers_exists" . -}} {{- if $initContainers_exists }} initContainers: - {{- if .Values.volume.dir_idx }} + {{- if .Values.volume.idx }} - name: seaweedfs-vol-move-idx image: {{ template "volume.image" . }} imagePullPolicy: {{ .Values.global.imagePullPolicy | default "IfNotPresent" }} command: [ '/bin/sh', '-c' ] - args: [ 'if ls {{ .Values.volume.dir }}/*.idx >/dev/null 2>&1; then mv {{ .Values.volume.dir }}/*.idx {{ .Values.volume.dir_idx }}/; fi;' ] + args: [ '{{range $dir := .Values.volume.dataDirs }}if ls /{{$dir.name}}/*.idx >/dev/null 2>&1; then mv /{{$dir.name}}/*.idx /idx/ ; fi; {{end}}' ] volumeMounts: - name: idx - mountPath: {{ .Values.volume.dir_idx }} - - name: data - mountPath: {{ .Values.volume.dir }} + mountPath: /idx + {{- range $dir := .Values.volume.dataDirs }} + - name: {{ $dir.name }} + mountPath: /{{ $dir.name }} + {{- end }} {{- end }} {{- if .Values.volume.initContainers }} {{ tpl .Values.volume.initContainers . | nindent 8 | trim }} @@ -100,7 +102,7 @@ spec: - "-ec" - | exec /usr/bin/weed \ - {{- if eq .Values.volume.logs.type "hostPath" }} + {{- if .Values.volume.logs }} -logdir=/logs \ {{- else }} -logtostderr=true \ @@ -115,11 +117,11 @@ spec: {{- if .Values.volume.metricsPort }} -metricsPort={{ .Values.volume.metricsPort }} \ {{- end }} - -dir={{ .Values.volume.dir }} \ - {{- if .Values.volume.dir_idx }} - -dir.idx={{ .Values.volume.dir_idx }} \ + -dir {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}/{{$dir.name}}{{end}} \ + {{- if .Values.volume.idx }} + -dir.idx=/idx \ {{- end }} - -max={{ .Values.volume.maxVolumes }} \ + -max {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}{{$dir.maxVolumes}}{{end}} \ {{- if .Values.volume.rack }} -rack={{ .Values.volume.rack }} \ {{- end }} @@ -146,18 +148,20 @@ spec: -minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \ -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume.{{ .Release.Namespace }} \ -compactionMBps={{ .Values.volume.compactionMBps }} \ - -mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + -mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} volumeMounts: - - name: data - mountPath: "{{ .Values.volume.dir }}/" - {{- if .Values.volume.dir_idx }} - - name: idx - mountPath: "{{ .Values.volume.dir_idx }}/" + {{- range $dir := .Values.volume.dataDirs }} + - name: {{ $dir.name }} + mountPath: "/{{ $dir.name }}/" {{- end }} - {{- if eq .Values.volume.logs.type "hostPath" }} + {{- if .Values.volume.logs }} - name: logs mountPath: "/logs/" {{- end }} + {{- if .Values.volume.idx }} + - name: idx + mountPath: "/idx/" + {{- end }} {{- if .Values.global.enableSecurity }} - name: security-config readOnly: true @@ -221,40 +225,51 @@ spec: {{- include "common.tplvalues.render" (dict "value" .Values.volume.sidecars "context" $) | nindent 8 }} {{- end }} volumes: - {{- if eq .Values.volume.data.type "hostPath" }} - - name: data + + {{- range $dir := .Values.volume.dataDirs }} + + {{- if eq $dir.type "hostPath" }} + - name: {{ $dir.name }} hostPath: - path: {{ .Values.volume.data.hostPathPrefix }}/object_store/ + path: {{ $dir.hostPathPrefix }}/object_store/ type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.volume.data.type "existingClaim" }} - - name: data + {{- end }} + {{- if eq $dir.type "existingClaim" }} + - name: {{ $dir.name }} persistentVolumeClaim: - claimName: {{ .Values.volume.data.claimName }} - {{- end }} - {{- if and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx }} + claimName: {{ $dir.claimName }} + {{- end }} + + {{- end }} + + {{- if .Values.volume.idx }} + {{- if eq .Values.volume.idx.type "hostPath" }} - name: idx hostPath: path: {{ .Values.volume.idx.hostPathPrefix }}/seaweedfs-volume-idx/ type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.volume.idx.type "existingClaim" }} + {{- end }} + {{- if eq .Values.volume.idx.type "existingClaim" }} - name: idx persistentVolumeClaim: - claimName: {{ .Values.volume.idx.claimName }} - {{- end }} - {{- if eq .Values.volume.logs.type "hostPath" }} + claimName: {{ .Values.volume.idx.claimName }} + {{- end }} + {{- end }} + + {{- if .Values.volume.logs }} + {{- if eq .Values.volume.logs.type "hostPath" }} - name: logs hostPath: path: {{ .Values.volume.logs.hostPathPrefix }}/logs/seaweedfs/volume type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.volume.logs.type "existingClaim" }} + {{- end }} + {{- if eq .Values.volume.logs.type "existingClaim" }} - name: logs persistentVolumeClaim: - claimName: {{ .Values.volume.data.claimName }} - {{- end }} - {{- if .Values.global.enableSecurity }} + claimName: {{ .Values.volume.logs.claimName }} + {{- end }} + {{- end }} + {{- if .Values.global.enableSecurity }} - name: security-config configMap: name: {{ template "seaweedfs.name" . }}-security-config @@ -273,7 +288,7 @@ spec: - name: client-cert secret: secretName: {{ template "seaweedfs.name" . }}-client-cert - {{- end }} + {{- end }} {{- if .Values.volume.extraVolumes }} {{ tpl .Values.volume.extraVolumes . | indent 8 | trim }} {{- end }} @@ -281,24 +296,25 @@ spec: nodeSelector: {{ tpl .Values.volume.nodeSelector . | indent 8 | trim }} {{- end }} - {{- $pvc_exists := include "volume.pvc_exists" . -}} - {{- if $pvc_exists }} volumeClaimTemplates: - {{- if eq .Values.volume.data.type "persistentVolumeClaim"}} + {{- range $dir := .Values.volume.dataDirs }} + {{- if eq $dir.type "persistentVolumeClaim" }} - metadata: - name: data - {{- with .Values.volume.data.annotations }} + name: {{ $dir.name }} + {{- with $dir.annotations }} annotations: {{- toYaml . | nindent 10 }} {{- end }} spec: accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ .Values.volume.data.storageClass }} + storageClassName: {{ $dir.storageClass }} resources: requests: - storage: {{ .Values.volume.data.size }} + storage: {{ $dir.size }} + {{- end }} {{- end }} - {{- if and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx }} + + {{- if and .Values.volume.idx (eq .Values.volume.idx.type "persistentVolumeClaim") }} - metadata: name: idx {{- with .Values.volume.idx.annotations }} @@ -312,7 +328,7 @@ spec: requests: storage: {{ .Values.volume.idx.size }} {{- end }} - {{- if eq .Values.volume.logs.type "persistentVolumeClaim" }} + {{- if and .Values.volume.logs (eq .Values.volume.logs.type "persistentVolumeClaim") }} - metadata: name: logs {{- with .Values.volume.logs.annotations }} @@ -326,5 +342,4 @@ spec: requests: storage: {{ .Values.volume.logs.size }} {{- end }} - {{- end }} -{{- end }} + {{- end }} diff --git a/k8s/charts/seaweedfs/values.yaml b/k8s/charts/seaweedfs/values.yaml index a0138d15c..61393e8e7 100644 --- a/k8s/charts/seaweedfs/values.yaml +++ b/k8s/charts/seaweedfs/values.yaml @@ -10,6 +10,7 @@ global: restartPolicy: Always loggingLevel: 1 enableSecurity: false + masterServer: null securityConfig: jwtSigning: volumeWrite: true @@ -89,7 +90,6 @@ master: # claimName: "my-pvc" data: type: "hostPath" - size: "" storageClass: "" hostPathPrefix: /ssd @@ -98,7 +98,7 @@ master: size: "" storageClass: "" hostPathPrefix: /storage - + ## @param master.sidecars Add additional sidecar containers to the master pod(s) ## e.g: ## sidecars: @@ -239,49 +239,63 @@ volume: # minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly minFreeSpacePercent: 7 - # You may use ANY storage-class, example with local-path-provisioner + # For each data disk you may use ANY storage-class, example with local-path-provisioner # Annotations are optional. - # data: - # type: "persistentVolumeClaim" - # size: "24Ti" - # storageClass: "local-path-provisioner" - # annotations: + # dataDirs: + # - name: data: + # type: "persistentVolumeClaim" + # size: "24Ti" + # storageClass: "local-path-provisioner" + # annotations: # "key": "value" + # maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7") # # You may also spacify an existing claim: - # data: - # type: "existingClaim" - # claimName: "my-pvc" + # - name: data + # type: "existingClaim" + # claimName: "my-pvc" + # maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7") - data: + dataDirs: + - name: data1 type: "hostPath" - size: "" - storageClass: "" - hostPathPrefix: /storage - - idx: - type: "hostPath" - size: "" - storageClass: "" hostPathPrefix: /ssd + maxVolumes: 0 - logs: - type: "hostPath" - size: "" - storageClass: "" - hostPathPrefix: /storage + # - name: data2 + # type: "persistentVolumeClaim" + # storageClass: "yourClassNameOfChoice" + # size: "800Gi" + # maxVolumes: 0 + + # idx can be defined by: + # + # idx: + # type: "hostPath" + # hostPathPrefix: /ssd + # + # or + # + # idx: + # type: "persistentVolumeClaim" + # size: "20Gi" + # storageClass: "local-path-provisioner" + # + # or + # + # idx: + # type: "existingClaim" + # claimName: "myClaim" + + # same applies to "logs" + + idx: "" + + logs: "" # limit background compaction or copying speed in mega bytes per second compactionMBps: "50" - # Directories to store data files. dir[,dir]... (default "/tmp") - dir: "/data" - # Directories to store index files. dir[,dir]... (default is the same as "dir") - dir_idx: null - - # Maximum numbers of volumes, count[,count]... - # If set to zero on non-windows OS, the limit will be auto configured. (default "7") - maxVolumes: "0" # Volume server's rack name rack: null @@ -297,7 +311,7 @@ volume: # Adjust jpg orientation when uploading. imagesFixOrientation: false - + ## @param volume.sidecars Add additional sidecar containers to the volume pod(s) ## e.g: ## sidecars: @@ -451,7 +465,7 @@ filer: size: "" storageClass: "" hostPathPrefix: /storage - + ## @param filer.sidecars Add additional sidecar containers to the filer pod(s) ## e.g: ## sidecars: @@ -644,7 +658,7 @@ s3: # Suffix of the host name, {bucket}.{domainName} domainName: "" - + ## @param s3.sidecars Add additional sidecar containers to the s3 pod(s) ## e.g: ## sidecars: diff --git a/weed/filer/filechunks_read.go b/weed/filer/filechunks_read.go index b8768ed63..11b297a3c 100644 --- a/weed/filer/filechunks_read.go +++ b/weed/filer/filechunks_read.go @@ -38,10 +38,10 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOff return int(a.ts - b.ts) } if a.isStart { - return -1 + return 1 } if b.isStart { - return 1 + return -1 } return 0 }) diff --git a/weed/filer/filechunks_test.go b/weed/filer/filechunks_test.go index b448950a9..7554b0080 100644 --- a/weed/filer/filechunks_test.go +++ b/weed/filer/filechunks_test.go @@ -553,3 +553,18 @@ func TestViewFromVisibleIntervals3(t *testing.T) { } } + +func TestCompactFileChunks3(t *testing.T) { + chunks := []*filer_pb.FileChunk{ + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50}, + {Offset: 100, Size: 100, FileId: "ghi", ModifiedTsNs: 50}, + {Offset: 200, Size: 100, FileId: "jkl", ModifiedTsNs: 100}, + {Offset: 300, Size: 100, FileId: "def", ModifiedTsNs: 200}, + } + + compacted, _ := CompactFileChunks(nil, chunks) + + if len(compacted) != 4 { + t.Fatalf("unexpected compacted: %d", len(compacted)) + } +} diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 51a82fb2e..2686fd833 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -3,13 +3,14 @@ package filer import ( "bytes" "fmt" - "golang.org/x/exp/slices" "io" "math" "strings" "sync" "time" + "golang.org/x/exp/slices" + "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -66,13 +67,14 @@ func NewFileReader(filerClient filer_pb.FilerClient, entry *filer_pb.Entry) io.R return NewChunkStreamReader(filerClient, entry.GetChunks()) } -func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { - return StreamContentWithThrottler(masterClient, writer, chunks, offset, size, 0) -} +type DoStreamContent func(writer io.Writer) error -func StreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) error { +func PrepareStreamContent(masterClient wdclient.HasLookupFileIdFunction, chunks []*filer_pb.FileChunk, offset int64, size int64) (DoStreamContent, error) { + return PrepareStreamContentWithThrottler(masterClient, chunks, offset, size, 0) +} - glog.V(4).Infof("start to stream content for chunks: %d", len(chunks)) +func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) { + glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks)) chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) fileId2Url := make(map[string][]string) @@ -91,52 +93,61 @@ func StreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, w } if err != nil { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) - return err + return nil, err } else if len(urlStrings) == 0 { errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) glog.Error(errUrlNotFound) - return errUrlNotFound + return nil, errUrlNotFound } fileId2Url[chunkView.FileId] = urlStrings } - downloadThrottler := util.NewWriteThrottler(downloadMaxBytesPs) - remaining := size - for x := chunkViews.Front(); x != nil; x = x.Next { - chunkView := x.Value - if offset < chunkView.ViewOffset { - gap := chunkView.ViewOffset - offset - remaining -= gap - glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset) - err := writeZero(writer, gap) + return func(writer io.Writer) error { + downloadThrottler := util.NewWriteThrottler(downloadMaxBytesPs) + remaining := size + for x := chunkViews.Front(); x != nil; x = x.Next { + chunkView := x.Value + if offset < chunkView.ViewOffset { + gap := chunkView.ViewOffset - offset + remaining -= gap + glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset) + err := writeZero(writer, gap) + if err != nil { + return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset) + } + offset = chunkView.ViewOffset + } + urlStrings := fileId2Url[chunkView.FileId] + start := time.Now() + err := retriedStreamFetchChunkData(writer, urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize)) + offset += int64(chunkView.ViewSize) + remaining -= int64(chunkView.ViewSize) + stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds()) if err != nil { - return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset) + stats.FilerHandlerCounter.WithLabelValues("chunkDownloadError").Inc() + return fmt.Errorf("read chunk: %v", err) } - offset = chunkView.ViewOffset - } - urlStrings := fileId2Url[chunkView.FileId] - start := time.Now() - err := retriedStreamFetchChunkData(writer, urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize)) - offset += int64(chunkView.ViewSize) - remaining -= int64(chunkView.ViewSize) - stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds()) - if err != nil { - stats.FilerHandlerCounter.WithLabelValues("chunkDownloadError").Inc() - return fmt.Errorf("read chunk: %v", err) + stats.FilerHandlerCounter.WithLabelValues("chunkDownload").Inc() + downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize)) } - stats.FilerHandlerCounter.WithLabelValues("chunkDownload").Inc() - downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize)) - } - if remaining > 0 { - glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining) - err := writeZero(writer, remaining) - if err != nil { - return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining) + if remaining > 0 { + glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining) + err := writeZero(writer, remaining) + if err != nil { + return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining) + } } - } - return nil + return nil + }, nil +} +func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { + streamFn, err := PrepareStreamContent(masterClient, chunks, offset, size) + if err != nil { + return err + } + return streamFn(writer) } // ---------------- ReadAllReader ---------------------------------- diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go index 8dd3900ed..1e7861129 100644 --- a/weed/s3api/s3api_object_handlers_postpolicy.go +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -39,7 +39,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R } defer form.RemoveAll() - fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form) + fileBody, fileName, fileContentType, fileSize, formValues, err := extractPostPolicyFormValues(form) if err != nil { s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest) return @@ -115,6 +115,14 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlEscapeObject(object)) + // Get ContentType from post formData + // Otherwise from formFile ContentType + contentType := formValues.Get("Content-Type") + if contentType == "" { + contentType = fileContentType + } + r.Header.Set("Content-Type", contentType) + etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody, "", bucket) if errCode != s3err.ErrNone { @@ -152,9 +160,10 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R } // Extract form fields and file data from a HTTP POST Policy -func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { +func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName, fileContentType string, fileSize int64, formValues http.Header, err error) { // / HTML Form values fileName = "" + fileContentType = "" // Canonicalize the form values into http.Header. formValues = make(http.Header) @@ -164,7 +173,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, // Validate form values. if err = validateFormFieldSize(formValues); err != nil { - return nil, "", 0, nil, err + return nil, "", "", 0, nil, err } // this means that filename="" was not specified for file key and Go has @@ -177,7 +186,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, } fileSize = int64(b.Len()) filePart = io.NopCloser(b) - return filePart, fileName, fileSize, formValues, nil + return filePart, fileName, fileContentType, fileSize, formValues, nil } // Iterator until we find a valid File field and break @@ -185,32 +194,34 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, canonicalFormName := http.CanonicalHeaderKey(k) if canonicalFormName == "File" { if len(v) == 0 { - return nil, "", 0, nil, errors.New("Invalid arguments specified") + return nil, "", "", 0, nil, errors.New("Invalid arguments specified") } // Fetch fileHeader which has the uploaded file information fileHeader := v[0] // Set filename fileName = fileHeader.Filename + // Set contentType + fileContentType = fileHeader.Header.Get("Content-Type") // Open the uploaded part filePart, err = fileHeader.Open() if err != nil { - return nil, "", 0, nil, err + return nil, "", "", 0, nil, err } // Compute file size fileSize, err = filePart.(io.Seeker).Seek(0, 2) if err != nil { - return nil, "", 0, nil, err + return nil, "", "", 0, nil, err } // Reset Seek to the beginning _, err = filePart.(io.Seeker).Seek(0, 0) if err != nil { - return nil, "", 0, nil, err + return nil, "", "", 0, nil, err } // File found and ready for reading break } } - return filePart, fileName, fileSize, formValues, nil + return filePart, fileName, fileContentType, fileSize, formValues, nil } // Validate form field size for s3 specification requirement. diff --git a/weed/server/common.go b/weed/server/common.go index d88298402..a7d67fb2e 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "io" "io/fs" "mime/multipart" @@ -18,6 +17,9 @@ import ( "sync" "time" + "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "google.golang.org/grpc" "github.com/seaweedfs/seaweedfs/weed/glog" @@ -282,7 +284,7 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file } } -func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) error { +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, prepareWriteFn func(offset int64, size int64) (filer.DoStreamContent, error)) error { rangeReq := r.Header.Get("Range") bufferedWriter := writePool.Get().(*bufio.Writer) bufferedWriter.Reset(w) @@ -293,7 +295,13 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 if rangeReq == "" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := writeFn(bufferedWriter, 0, totalSize); err != nil { + writeFn, err := prepareWriteFn(0, totalSize) + if err != nil { + glog.Errorf("processRangeRequest: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return fmt.Errorf("processRangeRequest: %v", err) + } + if err = writeFn(bufferedWriter); err != nil { glog.Errorf("processRangeRequest: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return fmt.Errorf("processRangeRequest: %v", err) @@ -335,8 +343,14 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) w.Header().Set("Content-Range", ra.contentRange(totalSize)) + writeFn, err := prepareWriteFn(ra.start, ra.length) + if err != nil { + glog.Errorf("processRangeRequest range[0]: %+v err: %v", w.Header(), err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return fmt.Errorf("processRangeRequest: %v", err) + } w.WriteHeader(http.StatusPartialContent) - err = writeFn(bufferedWriter, ra.start, ra.length) + err = writeFn(bufferedWriter) if err != nil { glog.Errorf("processRangeRequest range[0]: %+v err: %v", w.Header(), err) http.Error(w, err.Error(), http.StatusInternalServerError) @@ -346,11 +360,20 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 } // process multiple ranges - for _, ra := range ranges { + writeFnByRange := make(map[int](func(writer io.Writer) error)) + + for i, ra := range ranges { if ra.start > totalSize { http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) return fmt.Errorf("out of range: %v", err) } + writeFn, err := prepareWriteFn(ra.start, ra.length) + if err != nil { + glog.Errorf("processRangeRequest range[%d] err: %v", i, err) + http.Error(w, "Internal Error", http.StatusInternalServerError) + return fmt.Errorf("processRangeRequest range[%d] err: %v", i, err) + } + writeFnByRange[i] = writeFn } sendSize := rangesMIMESize(ranges, mimeType, totalSize) pr, pw := io.Pipe() @@ -359,13 +382,18 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 sendContent := pr defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. go func() { - for _, ra := range ranges { + for i, ra := range ranges { part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) if e != nil { pw.CloseWithError(e) return } - if e = writeFn(part, ra.start, ra.length); e != nil { + writeFn := writeFnByRange[i] + if writeFn == nil { + pw.CloseWithError(e) + return + } + if e = writeFn(part); e != nil { pw.CloseWithError(e) return } diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 6bdd6a9dd..d1cd3beae 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -67,12 +67,14 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, entry *filer.Ent ifModifiedSinceHeader := r.Header.Get("If-Modified-Since") if ifNoneMatchETagHeader != "" { if util.CanonicalizeETag(etag) == util.CanonicalizeETag(ifNoneMatchETagHeader) { + setEtag(w, etag) w.WriteHeader(http.StatusNotModified) return true } } else if ifModifiedSinceHeader != "" { if t, parseError := time.Parse(http.TimeFormat, ifModifiedSinceHeader); parseError == nil { if !t.Before(entry.Attr.Mtime) { + setEtag(w, etag) w.WriteHeader(http.StatusNotModified) return true } @@ -147,11 +149,11 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } - etag := filer.ETagEntry(entry) if checkPreconditions(w, r, entry) { return } + etag := filer.ETagEntry(entry) w.Header().Set("Accept-Ranges", "bytes") // mime type @@ -229,14 +231,16 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } - processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + processRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) { if offset+size <= int64(len(entry.Content)) { - _, err := writer.Write(entry.Content[offset : offset+size]) - if err != nil { - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorWriteEntry).Inc() - glog.Errorf("failed to write entry content: %v", err) - } - return err + return func(writer io.Writer) error { + _, err := writer.Write(entry.Content[offset : offset+size]) + if err != nil { + stats.FilerHandlerCounter.WithLabelValues(stats.ErrorWriteEntry).Inc() + glog.Errorf("failed to write entry content: %v", err) + } + return err + }, nil } chunks := entry.GetChunks() if entry.IsInRemoteOnly() { @@ -247,17 +251,25 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) }); err != nil { stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadCache).Inc() glog.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err) - return fmt.Errorf("cache %s: %v", entry.FullPath, err) + return nil, fmt.Errorf("cache %s: %v", entry.FullPath, err) } else { chunks = resp.Entry.GetChunks() } } - err = filer.StreamContentWithThrottler(fs.filer.MasterClient, writer, chunks, offset, size, fs.option.DownloadMaxBytesPs) + streamFn, err := filer.PrepareStreamContentWithThrottler(fs.filer.MasterClient, chunks, offset, size, fs.option.DownloadMaxBytesPs) if err != nil { stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadStream).Inc() - glog.Errorf("failed to stream content %s: %v", r.URL, err) + glog.Errorf("failed to prepare stream content %s: %v", r.URL, err) + return nil, err } - return err + return func(writer io.Writer) error { + err := streamFn(writer) + if err != nil { + stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadStream).Inc() + glog.Errorf("failed to stream content %s: %v", r.URL, err) + } + return err + }, nil }) } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 5f4beb77d..08e536811 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -15,6 +15,7 @@ import ( "sync/atomic" "time" + "github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/util/mem" @@ -382,12 +383,14 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re return nil } - return processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { - if _, e = rs.Seek(offset, 0); e != nil { + return processRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) { + return func(writer io.Writer) error { + if _, e = rs.Seek(offset, 0); e != nil { + return e + } + _, e = io.CopyN(writer, rs, size) return e - } - _, e = io.CopyN(writer, rs, size) - return e + }, nil }) } @@ -409,8 +412,10 @@ func (vs *VolumeServer) streamWriteResponseContent(filename string, mimeType str return } - processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { - return vs.store.ReadVolumeNeedleDataInto(volumeId, n, readOption, writer, offset, size) + processRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) { + return func(writer io.Writer) error { + return vs.store.ReadVolumeNeedleDataInto(volumeId, n, readOption, writer, offset, size) + }, nil }) } diff --git a/weed/shell/command_cluster_check.go b/weed/shell/command_cluster_check.go index 3fb72940f..03acca5b2 100644 --- a/weed/shell/command_cluster_check.go +++ b/weed/shell/command_cluster_check.go @@ -46,13 +46,13 @@ func (c *commandClusterCheck) Do(args []string, commandEnv *CommandEnv, writer i } fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(topologyInfo.DiskInfos)) - emptyDiskTypeDiskInfo, emptyDiskTypeFound := topologyInfo.DiskInfos[""] - hddDiskTypeDiskInfo, hddDiskTypeFound := topologyInfo.DiskInfos["hdd"] - if !emptyDiskTypeFound && !hddDiskTypeFound { - return fmt.Errorf("Need to a hdd disk type!") + if len(topologyInfo.DiskInfos) == 0 { + return fmt.Errorf("no disk type defined") } - if emptyDiskTypeFound && emptyDiskTypeDiskInfo.MaxVolumeCount == 0 || hddDiskTypeFound && hddDiskTypeDiskInfo.MaxVolumeCount == 0 { - return fmt.Errorf("Need to a hdd disk type!") + for diskType, diskInfo := range topologyInfo.DiskInfos { + if diskInfo.MaxVolumeCount == 0 { + return fmt.Errorf("no volume available for \"%s\" disk type", diskType) + } } // collect filers @@ -73,6 +73,19 @@ func (c *commandClusterCheck) Do(args []string, commandEnv *CommandEnv, writer i } fmt.Fprintf(writer, "the cluster has %d filers: %+v\n", len(filers), filers) + if len(filers) > 0 { + genericDiskInfo, genericDiskInfoOk := topologyInfo.DiskInfos[""] + hddDiskInfo, hddDiskInfoOk := topologyInfo.DiskInfos["hdd"] + + if !genericDiskInfoOk && !hddDiskInfoOk { + return fmt.Errorf("filer metadata logs need generic or hdd disk type to be defined") + } + + if (genericDiskInfoOk && genericDiskInfo.MaxVolumeCount == 0) || (hddDiskInfoOk && hddDiskInfo.MaxVolumeCount == 0) { + return fmt.Errorf("filer metadata logs need generic or hdd volumes to be available") + } + } + // collect volume servers var volumeServers []pb.ServerAddress t, _, err := collectTopologyInfo(commandEnv, 0) @@ -90,9 +103,7 @@ func (c *commandClusterCheck) Do(args []string, commandEnv *CommandEnv, writer i // collect all masters var masters []pb.ServerAddress - for _, master := range commandEnv.MasterClient.GetMasters() { - masters = append(masters, master) - } + masters = append(masters, commandEnv.MasterClient.GetMasters()...) // check from master to volume servers for _, master := range masters { diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index 2284ceea6..cb201e064 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -1,6 +1,7 @@ package shell import ( + "cmp" "flag" "fmt" "io" @@ -79,7 +80,7 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } // collect topology information - topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 15*time.Second) + topologyInfo, _, err := collectTopologyInfo(commandEnv, 15*time.Second) if err != nil { return err } @@ -94,16 +95,12 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return err } for _, c := range collections { - if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil { + if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, c, *applyBalancing); err != nil { return err } } - } else if *collection == "ALL_COLLECTIONS" { - if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil { - return err - } } else { - if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil { + if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, *collection, *applyBalancing); err != nil { return err } } @@ -111,10 +108,10 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return nil } -func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error { +func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, collection string, applyBalancing bool) error { for _, diskType := range diskTypes { - if err := balanceVolumeServersByDiskType(commandEnv, diskType, volumeReplicas, nodes, volumeSizeLimit, collection, applyBalancing); err != nil { + if err := balanceVolumeServersByDiskType(commandEnv, diskType, volumeReplicas, nodes, collection, applyBalancing); err != nil { return err } } @@ -122,7 +119,7 @@ func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []types.DiskType, vo } -func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error { +func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, collection string, applyBalancing bool) error { for _, n := range nodes { n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { @@ -164,7 +161,7 @@ func collectVolumeDiskTypes(t *master_pb.TopologyInfo) (diskTypes []types.DiskTy for _, dc := range t.DataCenterInfos { for _, r := range dc.RackInfos { for _, dn := range r.DataNodeInfos { - for diskType, _ := range dn.DiskInfos { + for diskType := range dn.DiskInfos { if _, found := knownTypes[diskType]; !found { knownTypes[diskType] = true } @@ -172,7 +169,7 @@ func collectVolumeDiskTypes(t *master_pb.TopologyInfo) (diskTypes []types.DiskTy } } } - for diskType, _ := range knownTypes { + for diskType := range knownTypes { diskTypes = append(diskTypes, types.ToDiskType(diskType)) } return @@ -244,7 +241,7 @@ func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) { slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) int { - return int(a.Size - b.Size) + return cmp.Compare(a.Size, b.Size) }) } @@ -270,7 +267,7 @@ func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volu for hasMoved { hasMoved = false slices.SortFunc(nodesWithCapacity, func(a, b *Node) int { - return int(a.localVolumeRatio(capacityFunc) - b.localVolumeRatio(capacityFunc)) + return cmp.Compare(a.localVolumeRatio(capacityFunc), b.localVolumeRatio(capacityFunc)) }) if len(nodesWithCapacity) == 0 { fmt.Printf("no volume server found with capacity for %s", diskType.ReadableString()) @@ -278,7 +275,8 @@ func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volu } var fullNode *Node - for fullNodeIndex := len(nodesWithCapacity) - 1; fullNodeIndex >= 0; fullNodeIndex-- { + var fullNodeIndex int + for fullNodeIndex = len(nodesWithCapacity) - 1; fullNodeIndex >= 0; fullNodeIndex-- { fullNode = nodesWithCapacity[fullNodeIndex] if !fullNode.isOneVolumeOnly() { break @@ -289,9 +287,7 @@ func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volu candidateVolumes = append(candidateVolumes, v) } sortCandidatesFn(candidateVolumes) - - for i := 0; i < len(nodesWithCapacity)-1; i++ { - emptyNode := nodesWithCapacity[i] + for _, emptyNode := range nodesWithCapacity[:fullNodeIndex] { if !(fullNode.localVolumeRatio(capacityFunc) > idealVolumeRatio && emptyNode.localVolumeNextRatio(capacityFunc) <= idealVolumeRatio) { // no more volume servers with empty slots break diff --git a/weed/shell/command_volume_balance_test.go b/weed/shell/command_volume_balance_test.go index 20c5abdf8..ce0aeb5ab 100644 --- a/weed/shell/command_volume_balance_test.go +++ b/weed/shell/command_volume_balance_test.go @@ -255,7 +255,7 @@ func TestBalance(t *testing.T) { volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) diskTypes := collectVolumeDiskTypes(topologyInfo) - if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, 30*1024*1024*1024, "ALL_COLLECTIONS", false); err != nil { + if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, "ALL_COLLECTIONS", false); err != nil { t.Errorf("balance: %v", err) } diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go index fb3d0130d..d3d47b605 100644 --- a/weed/storage/needle_map/memdb.go +++ b/weed/storage/needle_map/memdb.go @@ -1,11 +1,9 @@ package needle_map import ( - "bytes" "fmt" "io" "os" - "sort" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/iterator" @@ -36,6 +34,7 @@ func NewMemDb() *MemDb { } func (cm *MemDb) Set(key NeedleId, offset Offset, size Size) error { + bytes := ToBytes(key, offset, size) if err := cm.db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil { @@ -77,31 +76,6 @@ func doVisit(iter iterator.Iterator, visit func(NeedleValue) error) (ret error) return nil } -func (cm *MemDb) AscendingVisitByOffset(visit func(NeedleValue) error) (ret error) { - var needles []NeedleValue - err := cm.AscendingVisit(func(value NeedleValue) error { - needles = append(needles, value) - return nil - }) - if err != nil { - return err - } - sort.Slice(needles, func(i, j int) bool { - i_bytes := make([]byte, OffsetSize) - j_bytes := make([]byte, OffsetSize) - OffsetToBytes(i_bytes, needles[i].Offset) - OffsetToBytes(j_bytes, needles[j].Offset) - return bytes.Compare(i_bytes, j_bytes) < 0 - }) - for _, needle := range needles { - ret = visit(needle) - if ret != nil { - return ret - } - } - return nil -} - func (cm *MemDb) AscendingVisit(visit func(NeedleValue) error) (ret error) { iter := cm.db.NewIterator(nil, nil) if iter.First() { @@ -148,7 +122,7 @@ func (cm *MemDb) SaveToIdx(idxName string) (ret error) { idxFile.Close() }() - return cm.AscendingVisitByOffset(func(value NeedleValue) error { + return cm.AscendingVisit(func(value NeedleValue) error { if value.Offset.IsZero() || value.Size.IsDeleted() { return nil } diff --git a/weed/storage/volume_info/volume_info.go b/weed/storage/volume_info/volume_info.go index 2c6896830..59c08a833 100644 --- a/weed/storage/volume_info/volume_info.go +++ b/weed/storage/volume_info/volume_info.go @@ -76,9 +76,8 @@ func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) er return fmt.Errorf("failed to marshal %s: %v", fileName, marshalErr) } - writeErr := util.WriteFile(fileName, text, 0755) - if writeErr != nil { - return fmt.Errorf("failed to write %s: %v", fileName, writeErr) + if err := util.WriteFile(fileName, text, 0644); err != nil { + return fmt.Errorf("failed to write %s: %v", fileName, err) } return nil diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 03d7570c1..0a4cb4050 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -19,6 +19,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/sequence" + "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -273,6 +274,9 @@ func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { } func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { glog.Infof("removing volume info: %+v from %v", v, dn.id) + if v.ReplicaPlacement.GetCopyCount() > 1 { + stats.MasterReplicaPlacementMismatch.WithLabelValues(v.Collection, v.Id.String()).Set(0) + } diskType := types.ToDiskType(v.DiskType) volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) volumeLayout.UnRegisterVolume(&v, dn)