Browse Source

Merge branch 'master' into ec_rebuild_fix_parallelization

pull/7445/head
Chris Lu 2 months ago
committed by GitHub
parent
commit
50d2991399
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 4
      .github/workflows/container_dev.yml
  2. 4
      .github/workflows/container_latest.yml
  3. 4
      .github/workflows/container_release1.yml
  4. 4
      .github/workflows/container_release2.yml
  5. 4
      .github/workflows/container_release3.yml
  6. 4
      .github/workflows/container_release4.yml
  7. 4
      .github/workflows/container_release5.yml
  8. 2
      .github/workflows/container_rocksdb_version.yml
  9. 2
      .github/workflows/helm_ci.yml
  10. 7
      docker/Dockerfile.e2e
  11. 6
      docker/Dockerfile.go_build
  12. 6
      docker/Dockerfile.local
  13. 6
      docker/Dockerfile.rocksdb_large
  14. 6
      docker/Dockerfile.rocksdb_large_local
  15. 6
      docker/Makefile
  16. 8
      docker/compose/master-cloud.toml
  17. 32
      docker/entrypoint.sh
  18. 86
      docker/entrypoint_e2e.sh
  19. 16
      go.mod
  20. 48
      go.sum
  21. 38
      test/kafka/go.mod
  22. 80
      test/kafka/go.sum
  23. 10
      weed/command/scaffold/master.toml
  24. 2
      weed/command/server.go
  25. 2
      weed/command/volume.go
  26. 20
      weed/query/engine/function_helpers.go
  27. 8
      weed/s3api/custom_types.go
  28. 17
      weed/s3api/s3api_bucket_handlers.go
  29. 144
      weed/s3api/s3api_object_handlers.go
  30. 6
      weed/s3api/s3api_object_handlers_acl.go
  31. 20
      weed/s3api/s3api_object_handlers_put.go
  32. 9
      weed/s3api/s3api_object_retention.go
  33. 13
      weed/shell/command_collection_delete.go
  34. 11
      weed/shell/command_ec_balance.go
  35. 10
      weed/shell/command_ec_rebuild.go
  36. 25
      weed/shell/command_fs_configure.go
  37. 20
      weed/shell/command_fs_meta_change_volume_id.go
  38. 9
      weed/shell/command_volume_balance.go
  39. 303
      weed/shell/command_volume_check_disk.go
  40. 265
      weed/shell/command_volume_check_disk_test.go
  41. 10
      weed/shell/command_volume_delete_empty.go
  42. 32
      weed/shell/command_volume_fix_replication.go
  43. 12
      weed/shell/command_volume_server_evacuate.go
  44. 21
      weed/shell/command_volume_server_leave.go
  45. 9
      weed/shell/command_volume_tier_move.go
  46. 12
      weed/storage/volume_read.go

4
.github/workflows/container_dev.yml

@ -20,7 +20,7 @@ jobs:
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -33,7 +33,7 @@ jobs:
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

4
.github/workflows/container_latest.yml

@ -21,7 +21,7 @@ jobs:
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -34,7 +34,7 @@ jobs:
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

4
.github/workflows/container_release1.yml

@ -20,7 +20,7 @@ jobs:
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -34,7 +34,7 @@ jobs:
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

4
.github/workflows/container_release2.yml

@ -21,7 +21,7 @@ jobs:
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -35,7 +35,7 @@ jobs:
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

4
.github/workflows/container_release3.yml

@ -21,7 +21,7 @@ jobs:
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -35,7 +35,7 @@ jobs:
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

4
.github/workflows/container_release4.yml

@ -20,7 +20,7 @@ jobs:
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -34,7 +34,7 @@ jobs:
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

4
.github/workflows/container_release5.yml

@ -20,7 +20,7 @@ jobs:
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -34,7 +34,7 @@ jobs:
org.opencontainers.image.vendor=Chris Lu org.opencontainers.image.vendor=Chris Lu
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- -
name: Set up Docker Buildx name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

2
.github/workflows/container_rocksdb_version.yml

@ -82,7 +82,7 @@ jobs:
echo "seaweedfs_ref=$seaweed" >> "$GITHUB_OUTPUT" echo "seaweedfs_ref=$seaweed" >> "$GITHUB_OUTPUT"
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v1
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1

2
.github/workflows/helm_ci.yml

@ -31,7 +31,7 @@ jobs:
check-latest: true check-latest: true
- name: Set up chart-testing - name: Set up chart-testing
uses: helm/chart-testing-action@v2.7.0
uses: helm/chart-testing-action@v2.8.0
- name: Run chart-testing (list-changed) - name: Run chart-testing (list-changed)
id: list-changed id: list-changed

7
docker/Dockerfile.e2e

@ -3,6 +3,10 @@ FROM ubuntu:22.04
LABEL author="Chris Lu" LABEL author="Chris Lu"
# Use faster mirrors and optimize package installation # Use faster mirrors and optimize package installation
# Note: This e2e test image intentionally runs as root for simplicity and compatibility.
# Production images (Dockerfile.go_build) use proper user isolation with su-exec.
# For testing purposes, running as root avoids permission complexities and dependency
# on Alpine-specific tools like su-exec (not available in Ubuntu repos).
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \ DEBIAN_FRONTEND=noninteractive apt-get install -y \
--no-install-recommends \ --no-install-recommends \
@ -10,6 +14,7 @@ RUN apt-get update && \
curl \ curl \
fio \ fio \
fuse \ fuse \
ca-certificates \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& rm -rf /tmp/* \ && rm -rf /tmp/* \
@ -18,7 +23,7 @@ RUN mkdir -p /etc/seaweedfs /data/filerldb2
COPY ./weed /usr/bin/ COPY ./weed /usr/bin/
COPY ./filer.toml /etc/seaweedfs/filer.toml COPY ./filer.toml /etc/seaweedfs/filer.toml
COPY ./entrypoint.sh /entrypoint.sh
COPY ./entrypoint_e2e.sh /entrypoint.sh
# volume server grpc port # volume server grpc port
EXPOSE 18080 EXPOSE 18080

6
docker/Dockerfile.go_build

@ -17,7 +17,7 @@ COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer.toml /et
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
# Install dependencies and create non-root user # Install dependencies and create non-root user
RUN apk add --no-cache fuse && \
RUN apk add --no-cache fuse su-exec && \
addgroup -g 1000 seaweed && \ addgroup -g 1000 seaweed && \
adduser -D -u 1000 -G seaweed seaweed adduser -D -u 1000 -G seaweed seaweed
@ -47,7 +47,5 @@ RUN mkdir -p /data/filerldb2 && \
VOLUME /data VOLUME /data
WORKDIR /data WORKDIR /data
# Switch to non-root user
USER seaweed
# Entrypoint will handle permission fixes and user switching
ENTRYPOINT ["/entrypoint.sh"] ENTRYPOINT ["/entrypoint.sh"]

6
docker/Dockerfile.local

@ -8,7 +8,7 @@ COPY ./filer.toml /etc/seaweedfs/filer.toml
COPY ./entrypoint.sh /entrypoint.sh COPY ./entrypoint.sh /entrypoint.sh
# Install dependencies and create non-root user # Install dependencies and create non-root user
RUN apk add --no-cache fuse curl && \
RUN apk add --no-cache fuse curl su-exec && \
addgroup -g 1000 seaweed && \ addgroup -g 1000 seaweed && \
adduser -D -u 1000 -G seaweed seaweed adduser -D -u 1000 -G seaweed seaweed
@ -38,7 +38,5 @@ RUN mkdir -p /data/filerldb2 && \
VOLUME /data VOLUME /data
WORKDIR /data WORKDIR /data
# Switch to non-root user
USER seaweed
# Entrypoint will handle permission fixes and user switching
ENTRYPOINT ["/entrypoint.sh"] ENTRYPOINT ["/entrypoint.sh"]

6
docker/Dockerfile.rocksdb_large

@ -34,7 +34,7 @@ COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
# Install dependencies and create non-root user # Install dependencies and create non-root user
RUN apk add --no-cache fuse snappy gflags && \
RUN apk add --no-cache fuse snappy gflags su-exec && \
addgroup -g 1000 seaweed && \ addgroup -g 1000 seaweed && \
adduser -D -u 1000 -G seaweed seaweed adduser -D -u 1000 -G seaweed seaweed
@ -65,7 +65,5 @@ VOLUME /data
WORKDIR /data WORKDIR /data
# Switch to non-root user
USER seaweed
# Entrypoint will handle permission fixes and user switching
ENTRYPOINT ["/entrypoint.sh"] ENTRYPOINT ["/entrypoint.sh"]

6
docker/Dockerfile.rocksdb_large_local

@ -17,7 +17,7 @@ COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
# Install dependencies and create non-root user # Install dependencies and create non-root user
RUN apk add --no-cache fuse snappy gflags tmux && \
RUN apk add --no-cache fuse snappy gflags tmux su-exec && \
addgroup -g 1000 seaweed && \ addgroup -g 1000 seaweed && \
adduser -D -u 1000 -G seaweed seaweed adduser -D -u 1000 -G seaweed seaweed
@ -48,7 +48,5 @@ VOLUME /data
WORKDIR /data WORKDIR /data
# Switch to non-root user
USER seaweed
# Entrypoint will handle permission fixes and user switching
ENTRYPOINT ["/entrypoint.sh"] ENTRYPOINT ["/entrypoint.sh"]

6
docker/Makefile

@ -5,15 +5,19 @@ all: gen
gen: dev gen: dev
cgo ?= 0 cgo ?= 0
ldflags_extra ?= -extldflags -static
binary: binary:
export SWCOMMIT=$(shell git rev-parse --short HEAD) export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)" export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)"
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" -o weed_binary && mv weed_binary ../docker/weed
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w $(ldflags_extra) $(SWLDFLAGS)" -o weed_binary && mv weed_binary ../docker/weed
cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/ cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/
cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/ cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/
# Race detector requires CGO and dynamic linking - don't use -static
binary_race: options = -race binary_race: options = -race
binary_race: cgo = 1 binary_race: cgo = 1
binary_race: ldflags_extra =
binary_race: binary binary_race: binary
build: binary build: binary

8
docker/compose/master-cloud.toml

@ -10,10 +10,10 @@
scripts = """ scripts = """
lock lock
ec.encode -fullPercent=95 -quietFor=1h ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
volume.fix.replication -force
ec.rebuild -apply
ec.balance -apply
volume.balance -apply
volume.fix.replication -apply
unlock unlock
""" """
sleep_minutes = 17 # sleep minutes between each script execution sleep_minutes = 17 # sleep minutes between each script execution

32
docker/entrypoint.sh

@ -1,5 +1,33 @@
#!/bin/sh #!/bin/sh
# Fix permissions for mounted volumes
# If /data is mounted from host, it might have different ownership
# Fix this by ensuring seaweed user owns the directory
if [ "$(id -u)" = "0" ]; then
# Running as root, check and fix permissions if needed
SEAWEED_UID=$(id -u seaweed)
SEAWEED_GID=$(id -g seaweed)
# Verify seaweed user and group exist
if [ -z "$SEAWEED_UID" ] || [ -z "$SEAWEED_GID" ]; then
echo "Error: 'seaweed' user or group not found. Cannot fix permissions." >&2
exit 1
fi
DATA_UID=$(stat -c '%u' /data 2>/dev/null)
DATA_GID=$(stat -c '%g' /data 2>/dev/null)
# Only run chown -R if ownership doesn't match (much faster for subsequent starts)
echo "Fixing /data ownership for seaweed user (uid=$SEAWEED_UID, gid=$SEAWEED_GID)"
if ! chown -R seaweed:seaweed /data; then
echo "Warning: Failed to change ownership of /data. This may cause permission errors." >&2
echo "If /data is read-only or has mount issues, the application may fail to start." >&2
fi
# Use su-exec to drop privileges and run as seaweed user
exec su-exec seaweed "$0" "$@"
fi
isArgPassed() { isArgPassed() {
arg="$1" arg="$1"
argWithEqualSign="$1=" argWithEqualSign="$1="
@ -8,10 +36,10 @@ isArgPassed() {
passedArg="$1" passedArg="$1"
shift shift
case $passedArg in case $passedArg in
$arg)
"$arg")
return 0 return 0
;; ;;
$argWithEqualSign*)
"$argWithEqualSign"*)
return 0 return 0
;; ;;
esac esac

86
docker/entrypoint_e2e.sh

@ -0,0 +1,86 @@
#!/bin/bash
set -e
# Simplified entrypoint for e2e testing
#
# This script intentionally runs as root for e2e test environments to:
# 1. Simplify test setup and avoid permission-related test failures
# 2. Eliminate dependency on Alpine-specific tools (su-exec) since we use Ubuntu base
# 3. Focus testing on application logic rather than container security
#
# IMPORTANT: Production deployments should use Dockerfile.go_build with proper
# user isolation via su-exec. This simplified approach is ONLY for testing.
isArgPassed() {
arg="$1"
argWithEqualSign="$1="
shift
while [ $# -gt 0 ]; do
passedArg="$1"
shift
case $passedArg in
"$arg")
return 0
;;
"$argWithEqualSign"*)
return 0
;;
esac
done
return 1
}
case "$1" in
'master')
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
shift
exec /usr/bin/weed -logtostderr=true master $ARGS "$@"
;;
'volume')
ARGS="-dir=/data -max=0"
if isArgPassed "-max" "$@"; then
ARGS="-dir=/data"
fi
shift
exec /usr/bin/weed -logtostderr=true volume $ARGS "$@"
;;
'server')
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
if isArgPassed "-volume.max" "$@"; then
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
fi
shift
exec /usr/bin/weed -logtostderr=true server $ARGS "$@"
;;
'filer')
ARGS=""
shift
exec /usr/bin/weed -logtostderr=true filer $ARGS "$@"
;;
's3')
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
shift
exec /usr/bin/weed -logtostderr=true s3 $ARGS "$@"
;;
'mount')
shift
exec /usr/bin/weed -logtostderr=true mount "$@"
;;
'shell')
ARGS="-cluster=$SHELL_CLUSTER -filer=$SHELL_FILER -filerGroup=$SHELL_FILER_GROUP -master=$SHELL_MASTER -options=$SHELL_OPTIONS"
shift
exec echo "$@" | /usr/bin/weed -logtostderr=true shell $ARGS
;;
*)
exec /usr/bin/weed "$@"
;;
esac

16
go.mod

@ -101,7 +101,7 @@ require (
golang.org/x/image v0.32.0 golang.org/x/image v0.32.0
golang.org/x/net v0.46.0 golang.org/x/net v0.46.0
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sys v0.37.0
golang.org/x/sys v0.38.0
golang.org/x/text v0.30.0 // indirect golang.org/x/text v0.30.0 // indirect
golang.org/x/tools v0.37.0 // indirect golang.org/x/tools v0.37.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
@ -139,7 +139,7 @@ require (
github.com/hanwen/go-fuse/v2 v2.8.0 github.com/hanwen/go-fuse/v2 v2.8.0
github.com/hashicorp/raft v1.7.3 github.com/hashicorp/raft v1.7.3
github.com/hashicorp/raft-boltdb/v2 v2.3.1 github.com/hashicorp/raft-boltdb/v2 v2.3.1
github.com/hashicorp/vault/api v1.20.0
github.com/hashicorp/vault/api v1.22.0
github.com/jhump/protoreflect v1.17.0 github.com/jhump/protoreflect v1.17.0
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.9
github.com/linkedin/goavro/v2 v2.14.0 github.com/linkedin/goavro/v2 v2.14.0
@ -153,7 +153,7 @@ require (
github.com/rdleal/intervalst v1.5.0 github.com/rdleal/intervalst v1.5.0
github.com/redis/go-redis/v9 v9.14.1 github.com/redis/go-redis/v9 v9.14.1
github.com/schollz/progressbar/v3 v3.18.0 github.com/schollz/progressbar/v3 v3.18.0
github.com/shirou/gopsutil/v4 v4.25.9
github.com/shirou/gopsutil/v4 v4.25.10
github.com/tarantool/go-tarantool/v2 v2.4.1 github.com/tarantool/go-tarantool/v2 v2.4.1
github.com/tikv/client-go/v2 v2.0.7 github.com/tikv/client-go/v2 v2.0.7
github.com/xeipuuv/gojsonschema v1.2.0 github.com/xeipuuv/gojsonschema v1.2.0
@ -188,9 +188,9 @@ require (
github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
@ -232,10 +232,10 @@ require (
cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect
filippo.io/edwards25519 v1.1.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
@ -303,7 +303,7 @@ require (
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/noise v1.1.0 // indirect github.com/flynn/noise v1.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect github.com/gabriel-vasile/mimetype v1.4.9 // indirect

48
go.sum

@ -541,8 +541,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
@ -555,8 +555,8 @@ github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGlu
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 h1:l3SabZmNuXCMCbQUIeR4W6/N4j8SeH/lwX+a6leZhHo= github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 h1:l3SabZmNuXCMCbQUIeR4W6/N4j8SeH/lwX+a6leZhHo=
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2/go.mod h1:k+mEZ4f1pVqZTRqtSDW2AhZ/3wT5qLpsUA75C/k7dtE= github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2/go.mod h1:k+mEZ4f1pVqZTRqtSDW2AhZ/3wT5qLpsUA75C/k7dtE=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
@ -659,7 +659,6 @@ github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e h1:Xg+hGrY2
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e/go.mod h1:mq7Shfa/CaixoDxiyAAc5jZ6CVBAyPaNQCGS7mkj4Ho= github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e/go.mod h1:mq7Shfa/CaixoDxiyAAc5jZ6CVBAyPaNQCGS7mkj4Ho=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
@ -713,7 +712,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/biogo/store v0.0.0-20201120204734-aad293a2328f h1:+6okTAeUsUrdQr/qN7fIODzowrjjCrnJDg/gkYqcSXY= github.com/biogo/store v0.0.0-20201120204734-aad293a2328f h1:+6okTAeUsUrdQr/qN7fIODzowrjjCrnJDg/gkYqcSXY=
github.com/biogo/store v0.0.0-20201120204734-aad293a2328f/go.mod h1:z52shMwD6SGwRg2iYFjjDwX5Ene4ENTw6HfXraUy/08= github.com/biogo/store v0.0.0-20201120204734-aad293a2328f/go.mod h1:z52shMwD6SGwRg2iYFjjDwX5Ene4ENTw6HfXraUy/08=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
@ -902,10 +900,9 @@ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpm
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fanixk/geohash v0.0.0-20150324002647-c1f9b5fa157a h1:Fyfh/dsHFrC6nkX7H7+nFdTd1wROlX/FxEIWVpKYf1U= github.com/fanixk/geohash v0.0.0-20150324002647-c1f9b5fa157a h1:Fyfh/dsHFrC6nkX7H7+nFdTd1wROlX/FxEIWVpKYf1U=
github.com/fanixk/geohash v0.0.0-20150324002647-c1f9b5fa157a/go.mod h1:UgNw+PTmmGN8rV7RvjvnBMsoTU8ZXXnaT3hYsDTBlgQ= github.com/fanixk/geohash v0.0.0-20150324002647-c1f9b5fa157a/go.mod h1:UgNw+PTmmGN8rV7RvjvnBMsoTU8ZXXnaT3hYsDTBlgQ=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fluent/fluent-logger-golang v1.10.1 h1:wu54iN1O2afll5oQrtTjhgZRwWcfOeFFzwRsEkABfFQ= github.com/fluent/fluent-logger-golang v1.10.1 h1:wu54iN1O2afll5oQrtTjhgZRwWcfOeFFzwRsEkABfFQ=
@ -997,8 +994,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
@ -1216,13 +1213,12 @@ github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVU
github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM=
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
@ -1241,8 +1237,8 @@ github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKc
github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0=
github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc= github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc=
github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE= github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE=
github.com/hashicorp/vault/api v1.20.0 h1:KQMHElgudOsr+IbJgmbjHnCTxEpKs9LnozA1D3nozU4=
github.com/hashicorp/vault/api v1.20.0/go.mod h1:GZ4pcjfzoOWpkJ3ijHNpEoAxKEsBJnVljyTe3jM2Sms=
github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0=
github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM=
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0= github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts= github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw= github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
@ -1380,12 +1376,10 @@ github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuz
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@ -1404,14 +1398,11 @@ github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY=
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
@ -1534,7 +1525,6 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
@ -1610,7 +1600,6 @@ github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0t
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
@ -1633,8 +1622,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil/v4 v4.25.9 h1:JImNpf6gCVhKgZhtaAHJ0serfFGtlfIlSC08eaKdTrU=
github.com/shirou/gopsutil/v4 v4.25.9/go.mod h1:gxIxoC+7nQRwUl/xNhutXlD8lq+jxTgpIkEf3rADHL8=
github.com/shirou/gopsutil/v4 v4.25.10 h1:at8lk/5T1OgtuCp+AwrDofFRjnvosn0nkN2OLQ6g8tA=
github.com/shirou/gopsutil/v4 v4.25.10/go.mod h1:+kSwyC8DRUD9XXEHCAFjK+0nuArFJM0lva+StQAcskM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@ -2122,7 +2111,6 @@ golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -2228,8 +2216,8 @@ golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

38
test/kafka/go.mod

@ -19,10 +19,10 @@ require (
cloud.google.com/go/auth v0.16.5 // indirect cloud.google.com/go/auth v0.16.5 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.8.0 // indirect cloud.google.com/go/compute/metadata v0.8.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
@ -43,24 +43,24 @@ require (
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.55.8 // indirect github.com/aws/aws-sdk-go v1.55.8 // indirect
github.com/aws/aws-sdk-go-v2 v1.39.4 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
github.com/aws/aws-sdk-go-v2 v1.39.5 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect
github.com/aws/aws-sdk-go-v2/config v1.31.3 // indirect github.com/aws/aws-sdk-go-v2/config v1.31.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.18.19 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.18.20 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.12 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.12 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.12 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.12 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12 // indirect
github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.39.0 // indirect
github.com/aws/smithy-go v1.23.1 // indirect github.com/aws/smithy-go v1.23.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect github.com/bradenaw/juniper v0.15.3 // indirect
@ -189,7 +189,7 @@ require (
github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/samber/lo v1.51.0 // indirect github.com/samber/lo v1.51.0 // indirect
github.com/seaweedfs/goexif v1.0.3 // indirect github.com/seaweedfs/goexif v1.0.3 // indirect
github.com/shirou/gopsutil/v4 v4.25.9 // indirect
github.com/shirou/gopsutil/v4 v4.25.10 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/smarty/assertions v1.16.0 // indirect github.com/smarty/assertions v1.16.0 // indirect
@ -221,7 +221,7 @@ require (
github.com/zeebo/errs v1.4.0 // indirect github.com/zeebo/errs v1.4.0 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect
go.etcd.io/bbolt v1.4.2 // indirect go.etcd.io/bbolt v1.4.2 // indirect
go.mongodb.org/mongo-driver v1.17.4 // indirect
go.mongodb.org/mongo-driver v1.17.6 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect
@ -235,7 +235,7 @@ require (
golang.org/x/net v0.46.0 // indirect golang.org/x/net v0.46.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.17.0 // indirect golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.36.0 // indirect golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect golang.org/x/text v0.30.0 // indirect
golang.org/x/time v0.12.0 // indirect golang.org/x/time v0.12.0 // indirect

80
test/kafka/go.sum

@ -37,8 +37,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
@ -47,8 +47,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDo
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8=
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 h1:l3SabZmNuXCMCbQUIeR4W6/N4j8SeH/lwX+a6leZhHo= github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 h1:l3SabZmNuXCMCbQUIeR4W6/N4j8SeH/lwX+a6leZhHo=
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2/go.mod h1:k+mEZ4f1pVqZTRqtSDW2AhZ/3wT5qLpsUA75C/k7dtE= github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2/go.mod h1:k+mEZ4f1pVqZTRqtSDW2AhZ/3wT5qLpsUA75C/k7dtE=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
@ -102,42 +102,42 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
github.com/aws/aws-sdk-go-v2 v1.39.4 h1:qTsQKcdQPHnfGYBBs+Btl8QwxJeoWcOcPcixK90mRhg=
github.com/aws/aws-sdk-go-v2 v1.39.4/go.mod h1:yWSxrnioGUZ4WVv9TgMrNUeLV3PFESn/v+6T/Su8gnM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
github.com/aws/aws-sdk-go-v2 v1.39.5 h1:e/SXuia3rkFtapghJROrydtQpfQaaUgd1cUvyO1mp2w=
github.com/aws/aws-sdk-go-v2 v1.39.5/go.mod h1:yWSxrnioGUZ4WVv9TgMrNUeLV3PFESn/v+6T/Su8gnM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko=
github.com/aws/aws-sdk-go-v2/config v1.31.3 h1:RIb3yr/+PZ18YYNe6MDiG/3jVoJrPmdoCARwNkMGvco= github.com/aws/aws-sdk-go-v2/config v1.31.3 h1:RIb3yr/+PZ18YYNe6MDiG/3jVoJrPmdoCARwNkMGvco=
github.com/aws/aws-sdk-go-v2/config v1.31.3/go.mod h1:jjgx1n7x0FAKl6TnakqrpkHWWKcX3xfWtdnIJs5K9CE= github.com/aws/aws-sdk-go-v2/config v1.31.3/go.mod h1:jjgx1n7x0FAKl6TnakqrpkHWWKcX3xfWtdnIJs5K9CE=
github.com/aws/aws-sdk-go-v2/credentials v1.18.19 h1:Jc1zzwkSY1QbkEcLujwqRTXOdvW8ppND3jRBb/VhBQc=
github.com/aws/aws-sdk-go-v2/credentials v1.18.19/go.mod h1:DIfQ9fAk5H0pGtnqfqkbSIzky82qYnGvh06ASQXXg6A=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 h1:X7X4YKb+c0rkI6d4uJ5tEMxXgCZ+jZ/D6mvkno8c8Uw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11/go.mod h1:EqM6vPZQsZHYvC4Cai35UDg/f5NCEU+vp0WfbVqVcZc=
github.com/aws/aws-sdk-go-v2/credentials v1.18.20 h1:KFndAnHd9NUuzikHjQ8D5CfFVO+bgELkmcGY8yAw98Q=
github.com/aws/aws-sdk-go-v2/credentials v1.18.20/go.mod h1:9mCi28a+fmBHSQ0UM79omkz6JtN+PEsvLrnG36uoUv0=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.12 h1:VO3FIM2TDbm0kqp6sFNR0PbioXJb/HzCDW6NtIZpIWE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.12/go.mod h1:6C39gB8kg82tx3r72muZSrNhHia9rjGkX7ORaS2GKNE=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 h1:7AANQZkF3ihM8fbdftpjhken0TP9sBzFbV/Ze/Y4HXA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11/go.mod h1:NTF4QCGkm6fzVwncpkFQqoquQyOolcyXfbpC98urj+c=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 h1:ShdtWUZT37LCAA4Mw2kJAJtzaszfSHFb5n25sdcv4YE=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11/go.mod h1:7bUb2sSr2MZ3M/N+VyETLTQtInemHXb/Fl3s8CLzm0Y=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.12 h1:p/9flfXdoAnwJnuW9xHEAFY22R3A6skYkW19JFF9F+8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.12/go.mod h1:ZTLHakoVCTtW8AaLGSwJ3LXqHD9uQKnOcv1TrpO6u2k=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.12 h1:2lTWFvRcnWFFLzHWmtddu5MTchc5Oj2OOey++99tPZ0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.12/go.mod h1:hI92pK+ho8HVcWMHKHrK3Uml4pfG7wvL86FzO0LVtQQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12 h1:itu4KHu8JK/N6NcLIISlf3LL1LccMqruLUXZ9y7yBZw=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12/go.mod h1:i+6vTU3xziikTY3vcox23X8pPGW5X3wVgd1VZ7ha+x8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 h1:xtuxji5CS0JknaXoACOunXOYOQzgfTvGAc9s2QdCJA4= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 h1:xtuxji5CS0JknaXoACOunXOYOQzgfTvGAc9s2QdCJA4=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2/go.mod h1:zxwi0DIR0rcRcgdbl7E2MSOvxDyyXGBlScvBkARFaLQ= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2/go.mod h1:zxwi0DIR0rcRcgdbl7E2MSOvxDyyXGBlScvBkARFaLQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 h1:by3nYZLR9l8bUH7kgaMU4dJgYFjyRdFEfORlDpPILB4=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 h1:GpMf3z2KJa4RnJ0ew3Hac+hRFYLZ9DDjfgXjuW+pB54=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11/go.mod h1:6MZP3ZI4QQsgUCFTwMZA2V0sEriNQ8k2hmoHF3qjimQ=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls=
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3 h1:P18I4ipbk+b/3dZNq5YYh+Hq6XC0vp5RWkLp1tJldDA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3/go.mod h1:Rm3gw2Jov6e6kDuamDvyIlZJDMYk97VeCZ82wz/mVZ0=
github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 h1:M5nimZmugcZUO9wG7iVtROxPhiqyZX6ejS1lxlDPbTU=
github.com/aws/aws-sdk-go-v2/service/sso v1.29.8/go.mod h1:mbef/pgKhtKRwrigPPs7SSSKZgytzP8PQ6P6JAAdqyM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 h1:S5GuJZpYxE0lKeMHKn+BRTz6PTFpgThyJ+5mYfux7BM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3/go.mod h1:X4OF+BTd7HIb3L+tc4UlWHVrpgwZZIVENU15pRDVTI0=
github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 h1:Ekml5vGg6sHSZLZJQJagefnVe6PmqC2oiRkBq4F7fU0=
github.com/aws/aws-sdk-go-v2/service/sts v1.38.9/go.mod h1:/e15V+o1zFHWdH3u7lpI3rVBcxszktIKuHKCY2/py+k=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3 h1:NEe7FaViguRQEm8zl8Ay/kC/QRsMtWUiCGZajQIsLdc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3/go.mod h1:JLuCKu5VfiLBBBl/5IzZILU7rxS0koQpHzMOCzycOJU=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.12 h1:MM8imH7NZ0ovIVX7D2RxfMDv7Jt9OiUXkcQ+GqywA7M=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.12/go.mod h1:gf4OGwdNkbEsb7elw2Sy76odfhwNktWII3WgvQgQQ6w=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12 h1:R3uW0iKl8rgNEXNjVGliW/oMEh9fO/LlUEV8RvIFr1I=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12/go.mod h1:XEttbEr5yqsw8ebi7vlDoGJJjMXRez4/s9pibpJyL5s=
github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1 h1:Dq82AV+Qxpno/fG162eAhnD8d48t9S+GZCfz7yv1VeA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1/go.mod h1:MbKLznDKpf7PnSonNRUVYZzfP0CeLkRIUexeblgKcU4=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.0 h1:xHXvxst78wBpJFgDW07xllOx0IAzbryrSdM4nMVQ4Dw=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.0/go.mod h1:/e8m+AO6HNPPqMyfKRtzZ9+mBF5/x1Wk8QiDva4m07I=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.4 h1:tBw2Qhf0kj4ZwtsVpDiVRU3zKLvjvjgIjHMKirxXg8M=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.4/go.mod h1:Deq4B7sRM6Awq/xyOBlxBdgW8/Z926KYNNaGMW2lrkA=
github.com/aws/aws-sdk-go-v2/service/sts v1.39.0 h1:C+BRMnasSYFcgDw8o9H5hzehKzXyAb9GY5v/8bP9DUY=
github.com/aws/aws-sdk-go-v2/service/sts v1.39.0/go.mod h1:4EjU+4mIx6+JqKQkruye+CaigV7alL3thVPfDd9VlMs=
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M= github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -218,8 +218,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
@ -569,8 +569,8 @@ github.com/seaweedfs/goexif v1.0.3/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9
github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk= github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk=
github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E= github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil/v4 v4.25.9 h1:JImNpf6gCVhKgZhtaAHJ0serfFGtlfIlSC08eaKdTrU=
github.com/shirou/gopsutil/v4 v4.25.9/go.mod h1:gxIxoC+7nQRwUl/xNhutXlD8lq+jxTgpIkEf3rADHL8=
github.com/shirou/gopsutil/v4 v4.25.10 h1:at8lk/5T1OgtuCp+AwrDofFRjnvosn0nkN2OLQ6g8tA=
github.com/shirou/gopsutil/v4 v4.25.10/go.mod h1:+kSwyC8DRUD9XXEHCAFjK+0nuArFJM0lva+StQAcskM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
@ -685,8 +685,8 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -894,8 +894,8 @@ golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

10
weed/command/scaffold/master.toml

@ -9,11 +9,11 @@
scripts = """ scripts = """
lock lock
ec.encode -fullPercent=95 -quietFor=1h ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.deleteEmpty -quietFor=24h -force
volume.balance -force
volume.fix.replication -force
ec.rebuild -apply
ec.balance -apply
volume.deleteEmpty -quietFor=24h -apply
volume.balance -apply
volume.fix.replication -apply
s3.clean.uploads -timeAgo=24h s3.clean.uploads -timeAgo=24h
unlock unlock
""" """

2
weed/command/server.go

@ -143,7 +143,7 @@ func init() {
serverOptions.v.concurrentDownloadLimitMB = cmdServer.Flag.Int("volume.concurrentDownloadLimitMB", 64, "limit total concurrent download size") serverOptions.v.concurrentDownloadLimitMB = cmdServer.Flag.Int("volume.concurrentDownloadLimitMB", 64, "limit total concurrent download size")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes -memprofile and -cpuprofile")
serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files") serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers") serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
serverOptions.v.inflightDownloadDataTimeout = cmdServer.Flag.Duration("volume.inflightDownloadDataTimeout", 60*time.Second, "inflight download data wait timeout of volume servers") serverOptions.v.inflightDownloadDataTimeout = cmdServer.Flag.Duration("volume.inflightDownloadDataTimeout", 60*time.Second, "inflight download data wait timeout of volume servers")

2
weed/command/volume.go

@ -98,7 +98,7 @@ func init() {
v.ldbTimeout = cmdVolume.Flag.Int64("index.leveldbTimeout", 0, "alive time for leveldb (default to 0). If leveldb of volume is not accessed in ldbTimeout hours, it will be off loaded to reduce opened files and memory consumption.") v.ldbTimeout = cmdVolume.Flag.Int64("index.leveldbTimeout", 0, "alive time for leveldb (default to 0). If leveldb of volume is not accessed in ldbTimeout hours, it will be off loaded to reduce opened files and memory consumption.")
v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 256, "limit total concurrent upload size") v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 256, "limit total concurrent upload size")
v.concurrentDownloadLimitMB = cmdVolume.Flag.Int("concurrentDownloadLimitMB", 256, "limit total concurrent download size") v.concurrentDownloadLimitMB = cmdVolume.Flag.Int("concurrentDownloadLimitMB", 256, "limit total concurrent download size")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes -memprofile and -cpuprofile")
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
v.metricsHttpIp = cmdVolume.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.") v.metricsHttpIp = cmdVolume.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.")
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")

20
weed/query/engine/function_helpers.go

@ -102,22 +102,18 @@ func (e *SQLEngine) valueToTime(value *schema_pb.Value) (time.Time, error) {
case *schema_pb.Value_StringValue: case *schema_pb.Value_StringValue:
// Try to parse various date/time string formats // Try to parse various date/time string formats
dateFormats := []struct { dateFormats := []struct {
format string
useLocal bool
format string
tz *time.Location
}{ }{
{"2006-01-02 15:04:05", true}, // Local time assumed for non-timezone formats
{"2006-01-02T15:04:05Z", false}, // UTC format
{"2006-01-02T15:04:05", true}, // Local time assumed
{"2006-01-02", true}, // Local time assumed for date only
{"15:04:05", true}, // Local time assumed for time only
{"2006-01-02 15:04:05", time.Local}, // Local time assumed for non-timezone formats
{"2006-01-02T15:04:05Z", time.UTC}, // UTC format
{"2006-01-02T15:04:05", time.Local}, // Local time assumed
{"2006-01-02", time.Local}, // Local time assumed for date only
{"15:04:05", time.Local}, // Local time assumed for time only
} }
for _, formatSpec := range dateFormats { for _, formatSpec := range dateFormats {
if t, err := time.Parse(formatSpec.format, v.StringValue); err == nil {
if formatSpec.useLocal {
// Convert to UTC for consistency if no timezone was specified
return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC), nil
}
if t, err := time.ParseInLocation(formatSpec.format, v.StringValue, formatSpec.tz); err == nil {
return t, nil return t, nil
} }
} }

8
weed/s3api/custom_types.go

@ -1,11 +1,15 @@
package s3api package s3api
import "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00" const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00"
// ConditionalHeaderResult holds the result of conditional header checking // ConditionalHeaderResult holds the result of conditional header checking
type ConditionalHeaderResult struct { type ConditionalHeaderResult struct {
ErrorCode s3err.ErrorCode ErrorCode s3err.ErrorCode
ETag string // ETag of the object (for 304 responses)
ETag string // ETag of the object (for 304 responses)
Entry *filer_pb.Entry // Entry fetched during conditional check (nil if not fetched or object doesn't exist)
} }

17
weed/s3api/s3api_bucket_handlers.go

@ -7,7 +7,6 @@ import (
"encoding/xml" "encoding/xml"
"errors" "errors"
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/util"
"math" "math"
"net/http" "net/http"
"path" "path"
@ -16,6 +15,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket" "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket"
@ -210,6 +211,11 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
return return
} }
// Remove bucket from negative cache after successful creation
if s3a.bucketConfigCache != nil {
s3a.bucketConfigCache.RemoveNegativeCache(bucket)
}
// Check for x-amz-bucket-object-lock-enabled header (S3 standard compliance) // Check for x-amz-bucket-object-lock-enabled header (S3 standard compliance)
if objectLockHeaderValue := r.Header.Get(s3_constants.AmzBucketObjectLockEnabled); strings.EqualFold(objectLockHeaderValue, "true") { if objectLockHeaderValue := r.Header.Get(s3_constants.AmzBucketObjectLockEnabled); strings.EqualFold(objectLockHeaderValue, "true") {
glog.V(3).Infof("PutBucketHandler: enabling Object Lock and Versioning for bucket %s due to x-amz-bucket-object-lock-enabled header", bucket) glog.V(3).Infof("PutBucketHandler: enabling Object Lock and Versioning for bucket %s due to x-amz-bucket-object-lock-enabled header", bucket)
@ -493,16 +499,17 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
} }
func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode { func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode {
entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket)
if entry == nil || errors.Is(err, filer_pb.ErrNotFound) {
return s3err.ErrNoSuchBucket
// Use cached bucket config instead of direct getEntry call (optimization)
config, errCode := s3a.getBucketConfig(bucket)
if errCode != s3err.ErrNone {
return errCode
} }
//if iam is enabled, the access was already checked before //if iam is enabled, the access was already checked before
if s3a.iam.isEnabled() { if s3a.iam.isEnabled() {
return s3err.ErrNone return s3err.ErrNone
} }
if !s3a.hasAccess(r, entry) {
if !s3a.hasAccess(r, config.Entry) {
return s3err.ErrAccessDenied return s3err.ErrAccessDenied
} }
return s3err.ErrNone return s3err.ErrNone

144
weed/s3api/s3api_object_handlers.go

@ -264,6 +264,8 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
versionId := r.URL.Query().Get("versionId") versionId := r.URL.Query().Get("versionId")
// Check if versioning is configured for the bucket (Enabled or Suspended) // Check if versioning is configured for the bucket (Enabled or Suspended)
// Note: We need to check this even if versionId is empty, because versioned buckets
// handle even "get latest version" requests differently (through .versions directory)
versioningConfigured, err := s3a.isVersioningConfigured(bucket) versioningConfigured, err := s3a.isVersioningConfigured(bucket)
if err != nil { if err != nil {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
@ -344,31 +346,47 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
destUrl = s3a.toFilerUrl(bucket, object) destUrl = s3a.toFilerUrl(bucket, object)
} }
// Check if this is a range request to an SSE object and modify the approach
// Fetch the correct entry for SSE processing (respects versionId)
// This consolidates entry lookups to avoid multiple filer calls
var objectEntryForSSE *filer_pb.Entry
originalRangeHeader := r.Header.Get("Range") originalRangeHeader := r.Header.Get("Range")
var sseObject = false var sseObject = false
// Pre-check if this object is SSE encrypted to avoid filer range conflicts
if originalRangeHeader != "" {
bucket, object := s3_constants.GetBucketAndObject(r)
objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
if objectEntry, err := s3a.getEntry("", objectPath); err == nil {
primarySSEType := s3a.detectPrimarySSEType(objectEntry)
if primarySSEType == s3_constants.SSETypeC || primarySSEType == s3_constants.SSETypeKMS {
sseObject = true
// Temporarily remove Range header to get full encrypted data from filer
r.Header.Del("Range")
if versioningConfigured {
// For versioned objects, reuse the already-fetched entry
objectEntryForSSE = entry
} else {
// For non-versioned objects, try to reuse entry from conditional header check
if result.Entry != nil {
// Reuse entry fetched during conditional header check (optimization)
objectEntryForSSE = result.Entry
glog.V(3).Infof("GetObjectHandler: Reusing entry from conditional header check for %s/%s", bucket, object)
} else {
// No conditional headers were checked, fetch entry for SSE processing
var fetchErr error
objectEntryForSSE, fetchErr = s3a.fetchObjectEntry(bucket, object)
if fetchErr != nil {
glog.Errorf("GetObjectHandler: failed to get entry for SSE check: %v", fetchErr)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
if objectEntryForSSE == nil {
// Not found, return error early to avoid another lookup in proxyToFiler
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
return
} }
} }
} }
// Fetch the correct entry for SSE processing (respects versionId)
objectEntryForSSE, err := s3a.getObjectEntryForSSE(r, versioningConfigured, entry)
if err != nil {
glog.Errorf("GetObjectHandler: %v", err)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
// Check if this is an SSE object for Range request handling
// This applies to both versioned and non-versioned objects
if originalRangeHeader != "" && objectEntryForSSE != nil {
primarySSEType := s3a.detectPrimarySSEType(objectEntryForSSE)
if primarySSEType == s3_constants.SSETypeC || primarySSEType == s3_constants.SSETypeKMS {
sseObject = true
// Temporarily remove Range header to get full encrypted data from filer
r.Header.Del("Range")
}
} }
s3a.proxyToFiler(w, r, destUrl, false, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) { s3a.proxyToFiler(w, r, destUrl, false, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) {
@ -415,6 +433,8 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request
versionId := r.URL.Query().Get("versionId") versionId := r.URL.Query().Get("versionId")
// Check if versioning is configured for the bucket (Enabled or Suspended) // Check if versioning is configured for the bucket (Enabled or Suspended)
// Note: We need to check this even if versionId is empty, because versioned buckets
// handle even "get latest version" requests differently (through .versions directory)
versioningConfigured, err := s3a.isVersioningConfigured(bucket) versioningConfigured, err := s3a.isVersioningConfigured(bucket)
if err != nil { if err != nil {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
@ -494,11 +514,31 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request
} }
// Fetch the correct entry for SSE processing (respects versionId) // Fetch the correct entry for SSE processing (respects versionId)
objectEntryForSSE, err := s3a.getObjectEntryForSSE(r, versioningConfigured, entry)
if err != nil {
glog.Errorf("HeadObjectHandler: %v", err)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
// For versioned objects, reuse already-fetched entry; for non-versioned, try to reuse from conditional check
var objectEntryForSSE *filer_pb.Entry
if versioningConfigured {
objectEntryForSSE = entry
} else {
// For non-versioned objects, try to reuse entry from conditional header check
if result.Entry != nil {
// Reuse entry fetched during conditional header check (optimization)
objectEntryForSSE = result.Entry
glog.V(3).Infof("HeadObjectHandler: Reusing entry from conditional header check for %s/%s", bucket, object)
} else {
// No conditional headers were checked, fetch entry for SSE processing
var fetchErr error
objectEntryForSSE, fetchErr = s3a.fetchObjectEntry(bucket, object)
if fetchErr != nil {
glog.Errorf("HeadObjectHandler: failed to get entry for SSE check: %v", fetchErr)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
if objectEntryForSSE == nil {
// Not found, return error early to avoid another lookup in proxyToFiler
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
return
}
}
} }
s3a.proxyToFiler(w, r, destUrl, false, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) { s3a.proxyToFiler(w, r, destUrl, false, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) {
@ -658,21 +698,27 @@ func writeFinalResponse(w http.ResponseWriter, proxyResponse *http.Response, bod
return statusCode, bytesTransferred return statusCode, bytesTransferred
} }
// getObjectEntryForSSE fetches the correct filer entry for SSE processing
// For versioned objects, it reuses the already-fetched entry
// For non-versioned objects, it fetches the entry from the filer
func (s3a *S3ApiServer) getObjectEntryForSSE(r *http.Request, versioningConfigured bool, versionedEntry *filer_pb.Entry) (*filer_pb.Entry, error) {
if versioningConfigured {
// For versioned objects, we already have the correct entry
return versionedEntry, nil
// fetchObjectEntry fetches the filer entry for an object
// Returns nil if not found (not an error), or propagates other errors
func (s3a *S3ApiServer) fetchObjectEntry(bucket, object string) (*filer_pb.Entry, error) {
objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
fetchedEntry, fetchErr := s3a.getEntry("", objectPath)
if fetchErr != nil {
if errors.Is(fetchErr, filer_pb.ErrNotFound) {
return nil, nil // Not found is not an error for SSE check
}
return nil, fetchErr // Propagate other errors
} }
return fetchedEntry, nil
}
// For non-versioned objects, fetch the entry
bucket, object := s3_constants.GetBucketAndObject(r)
// fetchObjectEntryRequired fetches the filer entry for an object
// Returns an error if the object is not found or any other error occurs
func (s3a *S3ApiServer) fetchObjectEntryRequired(bucket, object string) (*filer_pb.Entry, error) {
objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
fetchedEntry, err := s3a.getEntry("", objectPath)
if err != nil && !errors.Is(err, filer_pb.ErrNotFound) {
return nil, fmt.Errorf("failed to get entry for SSE check %s: %w", objectPath, err)
fetchedEntry, fetchErr := s3a.getEntry("", objectPath)
if fetchErr != nil {
return nil, fetchErr // Return error for both not-found and other errors
} }
return fetchedEntry, nil return fetchedEntry, nil
} }
@ -750,7 +796,7 @@ func (s3a *S3ApiServer) handleSSECResponse(r *http.Request, proxyResponse *http.
if sseCChunks >= 1 { if sseCChunks >= 1 {
// Handle chunked SSE-C objects - each chunk needs independent decryption // Handle chunked SSE-C objects - each chunk needs independent decryption
multipartReader, decErr := s3a.createMultipartSSECDecryptedReader(r, proxyResponse)
multipartReader, decErr := s3a.createMultipartSSECDecryptedReader(r, proxyResponse, entry)
if decErr != nil { if decErr != nil {
glog.Errorf("Failed to create multipart SSE-C decrypted reader: %v", decErr) glog.Errorf("Failed to create multipart SSE-C decrypted reader: %v", decErr)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
@ -966,7 +1012,7 @@ func (s3a *S3ApiServer) handleSSEKMSResponse(r *http.Request, proxyResponse *htt
var decryptedReader io.Reader var decryptedReader io.Reader
if isMultipartSSEKMS { if isMultipartSSEKMS {
// Handle multipart SSE-KMS objects - each chunk needs independent decryption // Handle multipart SSE-KMS objects - each chunk needs independent decryption
multipartReader, decErr := s3a.createMultipartSSEKMSDecryptedReader(r, proxyResponse)
multipartReader, decErr := s3a.createMultipartSSEKMSDecryptedReader(r, proxyResponse, entry)
if decErr != nil { if decErr != nil {
glog.Errorf("Failed to create multipart SSE-KMS decrypted reader: %v", decErr) glog.Errorf("Failed to create multipart SSE-KMS decrypted reader: %v", decErr)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
@ -1271,16 +1317,8 @@ func (s3a *S3ApiServer) detectPrimarySSEType(entry *filer_pb.Entry) string {
} }
// createMultipartSSEKMSDecryptedReader creates a reader that decrypts each chunk independently for multipart SSE-KMS objects // createMultipartSSEKMSDecryptedReader creates a reader that decrypts each chunk independently for multipart SSE-KMS objects
func (s3a *S3ApiServer) createMultipartSSEKMSDecryptedReader(r *http.Request, proxyResponse *http.Response) (io.Reader, error) {
// Get the object path from the request
bucket, object := s3_constants.GetBucketAndObject(r)
objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
// Get the object entry from filer to access chunk information
entry, err := s3a.getEntry("", objectPath)
if err != nil {
return nil, fmt.Errorf("failed to get object entry for multipart SSE-KMS decryption: %v", err)
}
func (s3a *S3ApiServer) createMultipartSSEKMSDecryptedReader(r *http.Request, proxyResponse *http.Response, entry *filer_pb.Entry) (io.Reader, error) {
// Entry is passed from caller to avoid redundant filer lookup
// Sort chunks by offset to ensure correct order // Sort chunks by offset to ensure correct order
chunks := entry.GetChunks() chunks := entry.GetChunks()
@ -1531,22 +1569,14 @@ func (r *SSERangeReader) Read(p []byte) (n int, err error) {
// createMultipartSSECDecryptedReader creates a decrypted reader for multipart SSE-C objects // createMultipartSSECDecryptedReader creates a decrypted reader for multipart SSE-C objects
// Each chunk has its own IV and encryption key from the original multipart parts // Each chunk has its own IV and encryption key from the original multipart parts
func (s3a *S3ApiServer) createMultipartSSECDecryptedReader(r *http.Request, proxyResponse *http.Response) (io.Reader, error) {
func (s3a *S3ApiServer) createMultipartSSECDecryptedReader(r *http.Request, proxyResponse *http.Response, entry *filer_pb.Entry) (io.Reader, error) {
// Parse SSE-C headers from the request for decryption key // Parse SSE-C headers from the request for decryption key
customerKey, err := ParseSSECHeaders(r) customerKey, err := ParseSSECHeaders(r)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid SSE-C headers for multipart decryption: %v", err) return nil, fmt.Errorf("invalid SSE-C headers for multipart decryption: %v", err)
} }
// Get the object path from the request
bucket, object := s3_constants.GetBucketAndObject(r)
objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
// Get the object entry from filer to access chunk information
entry, err := s3a.getEntry("", objectPath)
if err != nil {
return nil, fmt.Errorf("failed to get object entry for multipart SSE-C decryption: %v", err)
}
// Entry is passed from caller to avoid redundant filer lookup
// Sort chunks by offset to ensure correct order // Sort chunks by offset to ensure correct order
chunks := entry.GetChunks() chunks := entry.GetChunks()

6
weed/s3api/s3api_object_handlers_acl.go

@ -68,8 +68,7 @@ func (s3a *S3ApiServer) GetObjectAclHandler(w http.ResponseWriter, r *http.Reque
} }
} else { } else {
// Handle regular (non-versioned) object ACL retrieval // Handle regular (non-versioned) object ACL retrieval
bucketDir := s3a.option.BucketsPath + "/" + bucket
entry, err = s3a.getEntry(bucketDir, object)
entry, err = s3a.fetchObjectEntryRequired(bucket, object)
if err != nil { if err != nil {
if errors.Is(err, filer_pb.ErrNotFound) { if errors.Is(err, filer_pb.ErrNotFound) {
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
@ -212,8 +211,7 @@ func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Reque
} }
} else { } else {
// Handle regular (non-versioned) object ACL modification // Handle regular (non-versioned) object ACL modification
bucketDir := s3a.option.BucketsPath + "/" + bucket
entry, err = s3a.getEntry(bucketDir, object)
entry, err = s3a.fetchObjectEntryRequired(bucket, object)
if err != nil { if err != nil {
if errors.Is(err, filer_pb.ErrNotFound) { if errors.Is(err, filer_pb.ErrNotFound) {
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)

20
weed/s3api/s3api_object_handlers_put.go

@ -1396,14 +1396,15 @@ func (s3a *S3ApiServer) checkConditionalHeadersForReadsWithGetter(getter EntryGe
if !objectExists { if !objectExists {
if headers.ifMatch != "" { if headers.ifMatch != "" {
glog.V(3).Infof("checkConditionalHeadersForReads: If-Match failed - object %s/%s does not exist", bucket, object) glog.V(3).Infof("checkConditionalHeadersForReads: If-Match failed - object %s/%s does not exist", bucket, object)
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed}
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed, Entry: nil}
} }
if !headers.ifUnmodifiedSince.IsZero() { if !headers.ifUnmodifiedSince.IsZero() {
glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since failed - object %s/%s does not exist", bucket, object) glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since failed - object %s/%s does not exist", bucket, object)
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed}
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed, Entry: nil}
} }
// If-None-Match and If-Modified-Since succeed when object doesn't exist // If-None-Match and If-Modified-Since succeed when object doesn't exist
return ConditionalHeaderResult{ErrorCode: s3err.ErrNone}
// No entry to return since object doesn't exist
return ConditionalHeaderResult{ErrorCode: s3err.ErrNone, Entry: nil}
} }
// Object exists - check all conditions // Object exists - check all conditions
@ -1419,7 +1420,7 @@ func (s3a *S3ApiServer) checkConditionalHeadersForReadsWithGetter(getter EntryGe
// Use production etagMatches method // Use production etagMatches method
if !s3a.etagMatches(headers.ifMatch, objectETag) { if !s3a.etagMatches(headers.ifMatch, objectETag) {
glog.V(3).Infof("checkConditionalHeadersForReads: If-Match failed for object %s/%s - expected ETag %s, got %s", bucket, object, headers.ifMatch, objectETag) glog.V(3).Infof("checkConditionalHeadersForReads: If-Match failed for object %s/%s - expected ETag %s, got %s", bucket, object, headers.ifMatch, objectETag)
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed}
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed, Entry: entry}
} }
} }
glog.V(3).Infof("checkConditionalHeadersForReads: If-Match passed for object %s/%s", bucket, object) glog.V(3).Infof("checkConditionalHeadersForReads: If-Match passed for object %s/%s", bucket, object)
@ -1430,7 +1431,7 @@ func (s3a *S3ApiServer) checkConditionalHeadersForReadsWithGetter(getter EntryGe
objectModTime := time.Unix(entry.Attributes.Mtime, 0) objectModTime := time.Unix(entry.Attributes.Mtime, 0)
if objectModTime.After(headers.ifUnmodifiedSince) { if objectModTime.After(headers.ifUnmodifiedSince) {
glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since failed - object modified after %s", r.Header.Get(s3_constants.IfUnmodifiedSince)) glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since failed - object modified after %s", r.Header.Get(s3_constants.IfUnmodifiedSince))
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed}
return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed, Entry: entry}
} }
glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since passed - object not modified since %s", r.Header.Get(s3_constants.IfUnmodifiedSince)) glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since passed - object not modified since %s", r.Header.Get(s3_constants.IfUnmodifiedSince))
} }
@ -1442,12 +1443,12 @@ func (s3a *S3ApiServer) checkConditionalHeadersForReadsWithGetter(getter EntryGe
if headers.ifNoneMatch == "*" { if headers.ifNoneMatch == "*" {
glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match=* failed - object %s/%s exists", bucket, object) glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match=* failed - object %s/%s exists", bucket, object)
return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag}
return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag, Entry: entry}
} }
// Use production etagMatches method // Use production etagMatches method
if s3a.etagMatches(headers.ifNoneMatch, objectETag) { if s3a.etagMatches(headers.ifNoneMatch, objectETag) {
glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match failed - ETag matches %s", objectETag) glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match failed - ETag matches %s", objectETag)
return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag}
return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag, Entry: entry}
} }
glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match passed - ETag %s doesn't match %s", objectETag, headers.ifNoneMatch) glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match passed - ETag %s doesn't match %s", objectETag, headers.ifNoneMatch)
} }
@ -1459,12 +1460,13 @@ func (s3a *S3ApiServer) checkConditionalHeadersForReadsWithGetter(getter EntryGe
// Use production getObjectETag method // Use production getObjectETag method
objectETag := s3a.getObjectETag(entry) objectETag := s3a.getObjectETag(entry)
glog.V(3).Infof("checkConditionalHeadersForReads: If-Modified-Since failed - object not modified since %s", r.Header.Get(s3_constants.IfModifiedSince)) glog.V(3).Infof("checkConditionalHeadersForReads: If-Modified-Since failed - object not modified since %s", r.Header.Get(s3_constants.IfModifiedSince))
return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag}
return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag, Entry: entry}
} }
glog.V(3).Infof("checkConditionalHeadersForReads: If-Modified-Since passed - object modified after %s", r.Header.Get(s3_constants.IfModifiedSince)) glog.V(3).Infof("checkConditionalHeadersForReads: If-Modified-Since passed - object modified after %s", r.Header.Get(s3_constants.IfModifiedSince))
} }
return ConditionalHeaderResult{ErrorCode: s3err.ErrNone}
// Return success with the fetched entry for reuse
return ConditionalHeaderResult{ErrorCode: s3err.ErrNone, Entry: entry}
} }
// checkConditionalHeadersForReads is the production method that uses the S3ApiServer as EntryGetter // checkConditionalHeadersForReads is the production method that uses the S3ApiServer as EntryGetter

9
weed/s3api/s3api_object_retention.go

@ -200,8 +200,7 @@ func (s3a *S3ApiServer) getObjectEntry(bucket, object, versionId string) (*filer
if versioningEnabled { if versioningEnabled {
entry, err = s3a.getLatestObjectVersion(bucket, object) entry, err = s3a.getLatestObjectVersion(bucket, object)
} else { } else {
bucketDir := s3a.option.BucketsPath + "/" + bucket
entry, err = s3a.getEntry(bucketDir, object)
entry, err = s3a.fetchObjectEntryRequired(bucket, object)
} }
} }
@ -284,8 +283,7 @@ func (s3a *S3ApiServer) setObjectRetention(bucket, object, versionId string, ret
} }
} }
} else { } else {
bucketDir := s3a.option.BucketsPath + "/" + bucket
entry, err = s3a.getEntry(bucketDir, object)
entry, err = s3a.fetchObjectEntryRequired(bucket, object)
if err != nil { if err != nil {
return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound) return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound)
} }
@ -426,8 +424,7 @@ func (s3a *S3ApiServer) setObjectLegalHold(bucket, object, versionId string, leg
} }
} }
} else { } else {
bucketDir := s3a.option.BucketsPath + "/" + bucket
entry, err = s3a.getEntry(bucketDir, object)
entry, err = s3a.fetchObjectEntryRequired(bucket, object)
if err != nil { if err != nil {
return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound) return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound)
} }

13
weed/shell/command_collection_delete.go

@ -23,7 +23,7 @@ func (c *commandCollectionDelete) Name() string {
func (c *commandCollectionDelete) Help() string { func (c *commandCollectionDelete) Help() string {
return `delete specified collection return `delete specified collection
collection.delete -collection <collection_name> -force
collection.delete -collection <collection_name> -apply
` `
} }
@ -36,11 +36,16 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ
colDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) colDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collectionName := colDeleteCommand.String("collection", "", "collection to delete. Use '_default_' for the empty-named collection.") collectionName := colDeleteCommand.String("collection", "", "collection to delete. Use '_default_' for the empty-named collection.")
applyBalancing := colDeleteCommand.Bool("force", false, "apply the collection")
applyBalancing := colDeleteCommand.Bool("apply", false, "apply the collection")
// TODO: remove this alias
applyBalancingAlias := colDeleteCommand.Bool("force", false, "apply the collection (alias for -apply)")
if err = colDeleteCommand.Parse(args); err != nil { if err = colDeleteCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyBalancing, "-force")
handleDeprecatedForceFlag(writer, colDeleteCommand, applyBalancingAlias, applyBalancing)
infoAboutSimulationMode(writer, *applyBalancing, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil { if err = commandEnv.confirmIsLocked(args); err != nil {
return return
@ -55,7 +60,7 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ
} }
if !*applyBalancing { if !*applyBalancing {
fmt.Fprintf(writer, "collection '%s' will be deleted. Use -force to apply the change.\n", *collectionName)
fmt.Fprintf(writer, "collection '%s' will be deleted. Use -apply to apply the change.\n", *collectionName)
return nil return nil
} }

11
weed/shell/command_ec_balance.go

@ -20,7 +20,7 @@ func (c *commandEcBalance) Name() string {
func (c *commandEcBalance) Help() string { func (c *commandEcBalance) Help() string {
return `balance all ec shards among all racks and volume servers return `balance all ec shards among all racks and volume servers
ec.balance [-c EACH_COLLECTION|<collection_name>] [-force] [-dataCenter <data_center>] [-shardReplicaPlacement <replica_placement>]
ec.balance [-c EACH_COLLECTION|<collection_name>] [-apply] [-dataCenter <data_center>] [-shardReplicaPlacement <replica_placement>]
Algorithm: Algorithm:
` + ecBalanceAlgorithmDescription ` + ecBalanceAlgorithmDescription
@ -36,11 +36,16 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter") dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
shardReplicaPlacement := balanceCommand.String("shardReplicaPlacement", "", "replica placement for EC shards, or master default if empty") shardReplicaPlacement := balanceCommand.String("shardReplicaPlacement", "", "replica placement for EC shards, or master default if empty")
maxParallelization := balanceCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible") maxParallelization := balanceCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible")
applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan")
applyBalancing := balanceCommand.Bool("apply", false, "apply the balancing plan")
// TODO: remove this alias
applyBalancingAlias := balanceCommand.Bool("force", false, "apply the balancing plan (alias for -apply)")
if err = balanceCommand.Parse(args); err != nil { if err = balanceCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyBalancing, "-force")
handleDeprecatedForceFlag(writer, balanceCommand, applyBalancingAlias, applyBalancing)
infoAboutSimulationMode(writer, *applyBalancing, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil { if err = commandEnv.confirmIsLocked(args); err != nil {
return return

10
weed/shell/command_ec_rebuild.go

@ -36,7 +36,7 @@ func (c *commandEcRebuild) Name() string {
func (c *commandEcRebuild) Help() string { func (c *commandEcRebuild) Help() string {
return `find and rebuild missing ec shards among volume servers return `find and rebuild missing ec shards among volume servers
ec.rebuild [-c EACH_COLLECTION|<collection_name>] [-force]
ec.rebuild [-c EACH_COLLECTION|<collection_name>] [-apply]
Algorithm: Algorithm:
@ -71,11 +71,15 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection") collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
applyChanges := fixCommand.Bool("force", false, "apply the changes")
applyChanges := fixCommand.Bool("apply", false, "apply the changes")
// TODO: remove this alias
applyChangesAlias := fixCommand.Bool("force", false, "apply the changes (alias for -apply)")
if err = fixCommand.Parse(args); err != nil { if err = fixCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyChanges, "-force")
handleDeprecatedForceFlag(writer, fixCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil { if err = commandEnv.confirmIsLocked(args); err != nil {
return return

25
weed/shell/command_fs_configure.go

@ -159,3 +159,28 @@ func infoAboutSimulationMode(writer io.Writer, forceMode bool, forceModeOption s
} }
fmt.Fprintf(writer, "Running in simulation mode. Use \"%s\" option to apply the changes.\n", forceModeOption) fmt.Fprintf(writer, "Running in simulation mode. Use \"%s\" option to apply the changes.\n", forceModeOption)
} }
// handleDeprecatedForceFlag handles the deprecated -force flag by checking if it was
// explicitly provided, printing a deprecation warning, and copying its
// value to the new flag. This ensures that explicit -force=false takes precedence.
func handleDeprecatedForceFlag(writer io.Writer, fs *flag.FlagSet, forceAlias *bool, applyFlag *bool) {
forceIsSet := false
applyIsSet := false
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "force":
forceIsSet = true
case "apply":
applyIsSet = true
}
})
if forceIsSet {
if applyIsSet {
fmt.Fprintf(writer, "WARNING: both -force and -apply are set. -force is deprecated and takes precedence. Please use only -apply.\n")
} else {
fmt.Fprintf(writer, "WARNING: -force is deprecated, please use -apply instead.\n")
}
*applyFlag = *forceAlias
}
}

20
weed/shell/command_fs_meta_change_volume_id.go

@ -4,13 +4,14 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
"io" "io"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
) )
func init() { func init() {
@ -27,8 +28,8 @@ func (c *commandFsMetaChangeVolumeId) Name() string {
func (c *commandFsMetaChangeVolumeId) Help() string { func (c *commandFsMetaChangeVolumeId) Help() string {
return `change volume id in existing metadata. return `change volume id in existing metadata.
fs.meta.changeVolumeId -dir=/path/to/a/dir -fromVolumeId=x -toVolumeId=y -force
fs.meta.changeVolumeId -dir=/path/to/a/dir -mapping=/path/to/mapping/file -force
fs.meta.changeVolumeId -dir=/path/to/a/dir -fromVolumeId=x -toVolumeId=y -apply
fs.meta.changeVolumeId -dir=/path/to/a/dir -mapping=/path/to/mapping/file -apply
The mapping file should have these lines, each line is: [fromVolumeId]=>[toVolumeId] The mapping file should have these lines, each line is: [fromVolumeId]=>[toVolumeId]
e.g. e.g.
@ -49,11 +50,16 @@ func (c *commandFsMetaChangeVolumeId) Do(args []string, commandEnv *CommandEnv,
mappingFileName := fsMetaChangeVolumeIdCommand.String("mapping", "", "a file with multiple volume id changes, with each line as x=>y") mappingFileName := fsMetaChangeVolumeIdCommand.String("mapping", "", "a file with multiple volume id changes, with each line as x=>y")
fromVolumeId := fsMetaChangeVolumeIdCommand.Uint("fromVolumeId", 0, "change metadata with this volume id") fromVolumeId := fsMetaChangeVolumeIdCommand.Uint("fromVolumeId", 0, "change metadata with this volume id")
toVolumeId := fsMetaChangeVolumeIdCommand.Uint("toVolumeId", 0, "change metadata to this volume id") toVolumeId := fsMetaChangeVolumeIdCommand.Uint("toVolumeId", 0, "change metadata to this volume id")
isForce := fsMetaChangeVolumeIdCommand.Bool("force", false, "applying the metadata changes")
applyChanges := fsMetaChangeVolumeIdCommand.Bool("apply", false, "apply the metadata changes")
// TODO: remove this alias
applyChangesAlias := fsMetaChangeVolumeIdCommand.Bool("force", false, "apply the metadata changes (alias for -apply)")
if err = fsMetaChangeVolumeIdCommand.Parse(args); err != nil { if err = fsMetaChangeVolumeIdCommand.Parse(args); err != nil {
return err return err
} }
handleDeprecatedForceFlag(writer, fsMetaChangeVolumeIdCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
// load the mapping // load the mapping
mapping := make(map[needle.VolumeId]needle.VolumeId) mapping := make(map[needle.VolumeId]needle.VolumeId)
if *mappingFileName != "" { if *mappingFileName != "" {
@ -86,7 +92,7 @@ func (c *commandFsMetaChangeVolumeId) Do(args []string, commandEnv *CommandEnv,
} }
if hasChanges { if hasChanges {
println("Updating", parentPath, entry.Name) println("Updating", parentPath, entry.Name)
if *isForce {
if *applyChanges {
if updateErr := filer_pb.UpdateEntry(context.Background(), client, &filer_pb.UpdateEntryRequest{ if updateErr := filer_pb.UpdateEntry(context.Background(), client, &filer_pb.UpdateEntryRequest{
Directory: string(parentPath), Directory: string(parentPath),
Entry: entry, Entry: entry,

9
weed/shell/command_volume_balance.go

@ -39,7 +39,7 @@ func (c *commandVolumeBalance) Name() string {
func (c *commandVolumeBalance) Help() string { func (c *commandVolumeBalance) Help() string {
return `balance all volumes among volume servers return `balance all volumes among volume servers
volume.balance [-collection ALL_COLLECTIONS|EACH_COLLECTION|<collection_name>] [-force] [-dataCenter=<data_center_name>] [-racks=rack_name_one,rack_name_two] [-nodes=192.168.0.1:8080,192.168.0.2:8080]
volume.balance [-collection ALL_COLLECTIONS|EACH_COLLECTION|<collection_name>] [-apply] [-dataCenter=<data_center_name>] [-racks=rack_name_one,rack_name_two] [-nodes=192.168.0.1:8080,192.168.0.2:8080]
The -collection parameter supports: The -collection parameter supports:
- ALL_COLLECTIONS: balance across all collections - ALL_COLLECTIONS: balance across all collections
@ -92,14 +92,17 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
nodes := balanceCommand.String("nodes", "", "only apply the balancing for this nodes") nodes := balanceCommand.String("nodes", "", "only apply the balancing for this nodes")
writable := balanceCommand.Bool("writable", false, "only apply the balancing for writable volumes") writable := balanceCommand.Bool("writable", false, "only apply the balancing for writable volumes")
noLock := balanceCommand.Bool("noLock", false, "do not lock the admin shell at one's own risk") noLock := balanceCommand.Bool("noLock", false, "do not lock the admin shell at one's own risk")
applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan.")
applyBalancing := balanceCommand.Bool("apply", false, "apply the balancing plan.")
// TODO: remove this alias
applyBalancingAlias := balanceCommand.Bool("force", false, "apply the balancing plan (alias for -apply)")
if err = balanceCommand.Parse(args); err != nil { if err = balanceCommand.Parse(args); err != nil {
return nil return nil
} }
handleDeprecatedForceFlag(writer, balanceCommand, applyBalancingAlias, applyBalancing)
c.writable = *writable c.writable = *writable
c.applyBalancing = *applyBalancing c.applyBalancing = *applyBalancing
infoAboutSimulationMode(writer, c.applyBalancing, "-force")
infoAboutSimulationMode(writer, c.applyBalancing, "-apply")
if *noLock { if *noLock {
commandEnv.noLock = true commandEnv.noLock = true

303
weed/shell/command_volume_check_disk.go

@ -8,7 +8,6 @@ import (
"io" "io"
"math" "math"
"net/http" "net/http"
"sync"
"time" "time"
"slices" "slices"
@ -26,9 +25,18 @@ func init() {
Commands = append(Commands, &commandVolumeCheckDisk{}) Commands = append(Commands, &commandVolumeCheckDisk{})
} }
type commandVolumeCheckDisk struct {
env *CommandEnv
writer io.Writer
type commandVolumeCheckDisk struct{}
type volumeCheckDisk struct {
commandEnv *CommandEnv
writer io.Writer
now time.Time
slowMode bool
verbose bool
applyChanges bool
syncDeletions bool
nonRepairThreshold float64
} }
func (c *commandVolumeCheckDisk) Name() string { func (c *commandVolumeCheckDisk) Name() string {
@ -53,90 +61,41 @@ func (c *commandVolumeCheckDisk) HasTag(tag CommandTag) bool {
return tag == ResourceHeavy return tag == ResourceHeavy
} }
func (c *commandVolumeCheckDisk) getVolumeStatusFileCount(vid uint32, dn *master_pb.DataNodeInfo) (totalFileCount, deletedFileCount uint64) {
err := operation.WithVolumeServerClient(false, pb.NewServerAddressWithGrpcPort(dn.Id, int(dn.GrpcPort)), c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
resp, reqErr := volumeServerClient.VolumeStatus(context.Background(), &volume_server_pb.VolumeStatusRequest{
VolumeId: uint32(vid),
})
if resp != nil {
totalFileCount = resp.FileCount
deletedFileCount = resp.FileDeletedCount
}
return reqErr
})
if err != nil {
fmt.Fprintf(c.writer, "getting number of files for volume id %d from volumes status: %+v\n", vid, err)
}
return totalFileCount, deletedFileCount
}
func (c *commandVolumeCheckDisk) eqVolumeFileCount(a, b *VolumeReplica) (bool, bool) {
var waitGroup sync.WaitGroup
var fileCountA, fileCountB, fileDeletedCountA, fileDeletedCountB uint64
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
fileCountA, fileDeletedCountA = c.getVolumeStatusFileCount(a.info.Id, a.location.dataNode)
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
fileCountB, fileDeletedCountB = c.getVolumeStatusFileCount(b.info.Id, b.location.dataNode)
}()
// Trying to synchronize a remote call to two nodes
waitGroup.Wait()
return fileCountA == fileCountB, fileDeletedCountA == fileDeletedCountB
}
func (c *commandVolumeCheckDisk) shouldSkipVolume(a, b *VolumeReplica, pulseTime time.Time, syncDeletions, verbose bool) bool {
pulseTimeAtSecond := pulseTime.Unix()
doSyncDeletedCount := false
if syncDeletions && a.info.DeleteCount != b.info.DeleteCount {
doSyncDeletedCount = true
}
if (a.info.FileCount != b.info.FileCount) || doSyncDeletedCount {
// Do synchronization of volumes, if the modification time was before the last pulsation time
if a.info.ModifiedAtSecond < pulseTimeAtSecond || b.info.ModifiedAtSecond < pulseTimeAtSecond {
return false
}
if eqFileCount, eqDeletedFileCount := c.eqVolumeFileCount(a, b); eqFileCount {
if doSyncDeletedCount && !eqDeletedFileCount {
return false
}
if verbose {
fmt.Fprintf(c.writer, "skipping active volumes %d with the same file counts on %s and %s\n",
a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id)
}
} else {
return false
}
}
return true
}
func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same") slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same")
verbose := fsckCommand.Bool("v", false, "verbose mode") verbose := fsckCommand.Bool("v", false, "verbose mode")
volumeId := fsckCommand.Uint("volumeId", 0, "the volume id") volumeId := fsckCommand.Uint("volumeId", 0, "the volume id")
applyChanges := fsckCommand.Bool("force", false, "apply the fix")
applyChanges := fsckCommand.Bool("apply", false, "apply the fix")
// TODO: remove this alias
applyChangesAlias := fsckCommand.Bool("force", false, "apply the fix (alias for -apply)")
syncDeletions := fsckCommand.Bool("syncDeleted", false, "sync of deletions the fix") syncDeletions := fsckCommand.Bool("syncDeleted", false, "sync of deletions the fix")
nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit") nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit")
if err = fsckCommand.Parse(args); err != nil { if err = fsckCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyChanges, "-force")
handleDeprecatedForceFlag(writer, fsckCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil { if err = commandEnv.confirmIsLocked(args); err != nil {
return return
} }
c.env = commandEnv
c.writer = writer
vcd := &volumeCheckDisk{
commandEnv: commandEnv,
writer: writer,
now: time.Now(),
slowMode: *slowMode,
verbose: *verbose,
applyChanges: *applyChanges,
syncDeletions: *syncDeletions,
nonRepairThreshold: *nonRepairThreshold,
}
// collect topology information // collect topology information
pulseTime := time.Now().Add(-constants.VolumePulsePeriod * 2)
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0) topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
if err != nil { if err != nil {
return err return err
@ -152,7 +111,7 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
var writableReplicas []*VolumeReplica var writableReplicas []*VolumeReplica
for _, replica := range replicas { for _, replica := range replicas {
if replica.info.ReadOnly { if replica.info.ReadOnly {
fmt.Fprintf(writer, "skipping readonly volume %d on %s\n", replica.info.Id, replica.location.dataNode.Id)
vcd.write("skipping readonly volume %d on %s\n", replica.info.Id, replica.location.dataNode.Id)
} else { } else {
writableReplicas = append(writableReplicas, replica) writableReplicas = append(writableReplicas, replica)
} }
@ -163,13 +122,19 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
}) })
for len(writableReplicas) >= 2 { for len(writableReplicas) >= 2 {
a, b := writableReplicas[0], writableReplicas[1] a, b := writableReplicas[0], writableReplicas[1]
if !*slowMode && c.shouldSkipVolume(a, b, pulseTime, *syncDeletions, *verbose) {
// always choose the larger volume to be the source
writableReplicas = append(replicas[:1], writableReplicas[2:]...)
continue
if !vcd.slowMode {
shouldSkip, err := vcd.shouldSkipVolume(a, b)
if err != nil {
vcd.write("error checking if volume %d should be skipped: %v\n", a.info.Id, err)
// Continue with sync despite error to be safe
} else if shouldSkip {
// always choose the larger volume to be the source
writableReplicas = append(writableReplicas[:1], writableReplicas[2:]...)
continue
}
} }
if err := c.syncTwoReplicas(a, b, *applyChanges, *syncDeletions, *nonRepairThreshold, *verbose); err != nil {
fmt.Fprintf(writer, "sync volume %d on %s and %s: %v\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
if err := vcd.syncTwoReplicas(a, b); err != nil {
vcd.write("sync volume %d on %s and %s: %v\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
} }
// always choose the larger volume to be the source // always choose the larger volume to be the source
if a.info.FileCount > b.info.FileCount { if a.info.FileCount > b.info.FileCount {
@ -183,32 +148,134 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
return nil return nil
} }
func (c *commandVolumeCheckDisk) syncTwoReplicas(a *VolumeReplica, b *VolumeReplica, applyChanges bool, doSyncDeletions bool, nonRepairThreshold float64, verbose bool) (err error) {
func (vcd *volumeCheckDisk) isLocked() bool {
return vcd.commandEnv.isLocked()
}
func (vcd *volumeCheckDisk) grpcDialOption() grpc.DialOption {
return vcd.commandEnv.option.GrpcDialOption
}
func (vcd *volumeCheckDisk) write(format string, a ...any) {
fmt.Fprintf(vcd.writer, format, a...)
}
func (vcd *volumeCheckDisk) writeVerbose(format string, a ...any) {
if vcd.verbose {
fmt.Fprintf(vcd.writer, format, a...)
}
}
// getVolumeStatusFileCount retrieves the current file count and deleted file count
// from a volume server via gRPC.
func (vcd *volumeCheckDisk) getVolumeStatusFileCount(vid uint32, dn *master_pb.DataNodeInfo) (totalFileCount, deletedFileCount uint64, err error) {
err = operation.WithVolumeServerClient(false, pb.NewServerAddressWithGrpcPort(dn.Id, int(dn.GrpcPort)), vcd.grpcDialOption(), func(volumeServerClient volume_server_pb.VolumeServerClient) error {
resp, reqErr := volumeServerClient.VolumeStatus(context.Background(), &volume_server_pb.VolumeStatusRequest{
VolumeId: uint32(vid),
})
if resp != nil {
totalFileCount = resp.FileCount
deletedFileCount = resp.FileDeletedCount
}
return reqErr
})
return totalFileCount, deletedFileCount, err
}
// eqVolumeFileCount compares the real-time file counts of two volume replicas
// by making sequential gRPC calls to their volume servers.
//
// Returns:
// - bool: true if file counts match
// - bool: true if deleted file counts match
// - error: any error from volume server communication
//
// Error Handling: Errors from getVolumeStatusFileCount are wrapped with context
// (volume ID and server) and propagated up. Uses fmt.Errorf with %w to maintain
// error chain for errors.Is() and errors.As().
func (vcd *volumeCheckDisk) eqVolumeFileCount(a, b *VolumeReplica) (bool, bool, error) {
fileCountA, fileDeletedCountA, errA := vcd.getVolumeStatusFileCount(a.info.Id, a.location.dataNode)
if errA != nil {
return false, false, fmt.Errorf("getting volume %d status from %s: %w", a.info.Id, a.location.dataNode.Id, errA)
}
fileCountB, fileDeletedCountB, errB := vcd.getVolumeStatusFileCount(b.info.Id, b.location.dataNode)
if errB != nil {
return false, false, fmt.Errorf("getting volume %d status from %s: %w", b.info.Id, b.location.dataNode.Id, errB)
}
return fileCountA == fileCountB, fileDeletedCountA == fileDeletedCountB, nil
}
// shouldSkipVolume determines whether two volume replicas should skip synchronization.
//
// Logic:
// 1. If file counts and delete counts match (when syncDeletions enabled), skip sync
// 2. If counts differ AND both volumes were modified recently (>= pulseTimeAtSecond),
// they may still be actively receiving writes, so we return true to skip sync and
// avoid false positives
// 3. If counts differ AND at least one volume was modified before the pulse cutoff,
// call eqVolumeFileCount to get real-time counts from volume servers
//
// Returns:
// - bool: true if sync should be skipped
// - error: any error from volume server communication (when eqVolumeFileCount is called)
//
// Error Handling: Errors from eqVolumeFileCount are wrapped with context and propagated.
// The Do method logs these errors and continues processing to ensure other volumes are checked.
func (vcd *volumeCheckDisk) shouldSkipVolume(a, b *VolumeReplica) (bool, error) {
pulseTimeAtSecond := vcd.now.Add(-constants.VolumePulsePeriod * 2).Unix()
doSyncDeletedCount := false
if vcd.syncDeletions && a.info.DeleteCount != b.info.DeleteCount {
doSyncDeletedCount = true
}
if (a.info.FileCount != b.info.FileCount) || doSyncDeletedCount {
// Do synchronization of volumes, if the modification time was before the last pulsation time
if a.info.ModifiedAtSecond < pulseTimeAtSecond || b.info.ModifiedAtSecond < pulseTimeAtSecond {
return false, nil
}
eqFileCount, eqDeletedFileCount, err := vcd.eqVolumeFileCount(a, b)
if err != nil {
return false, fmt.Errorf("comparing volume %d file counts on %s and %s: %w",
a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
}
if eqFileCount {
if doSyncDeletedCount && !eqDeletedFileCount {
return false, nil
}
vcd.writeVerbose("skipping active volumes %d with the same file counts on %s and %s\n",
a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id)
} else {
return false, nil
}
}
return true, nil
}
func (vcd *volumeCheckDisk) syncTwoReplicas(a *VolumeReplica, b *VolumeReplica) (err error) {
aHasChanges, bHasChanges := true, true aHasChanges, bHasChanges := true, true
const maxIterations = 5 const maxIterations = 5
iteration := 0 iteration := 0
for (aHasChanges || bHasChanges) && iteration < maxIterations { for (aHasChanges || bHasChanges) && iteration < maxIterations {
iteration++ iteration++
if verbose {
fmt.Fprintf(c.writer, "sync iteration %d for volume %d\n", iteration, a.info.Id)
}
vcd.writeVerbose("sync iteration %d for volume %d\n", iteration, a.info.Id)
prevAHasChanges, prevBHasChanges := aHasChanges, bHasChanges prevAHasChanges, prevBHasChanges := aHasChanges, bHasChanges
if aHasChanges, bHasChanges, err = c.checkBoth(a, b, applyChanges, doSyncDeletions, nonRepairThreshold, verbose); err != nil {
if aHasChanges, bHasChanges, err = vcd.checkBoth(a, b); err != nil {
return err return err
} }
// Detect if we're stuck in a loop with no progress // Detect if we're stuck in a loop with no progress
if iteration > 1 && prevAHasChanges == aHasChanges && prevBHasChanges == bHasChanges && (aHasChanges || bHasChanges) { if iteration > 1 && prevAHasChanges == aHasChanges && prevBHasChanges == bHasChanges && (aHasChanges || bHasChanges) {
fmt.Fprintf(c.writer, "volume %d sync is not making progress between %s and %s after iteration %d, stopping to prevent infinite loop\n",
vcd.write("volume %d sync is not making progress between %s and %s after iteration %d, stopping to prevent infinite loop\n",
a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, iteration) a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, iteration)
return fmt.Errorf("sync not making progress after %d iterations", iteration) return fmt.Errorf("sync not making progress after %d iterations", iteration)
} }
} }
if iteration >= maxIterations && (aHasChanges || bHasChanges) { if iteration >= maxIterations && (aHasChanges || bHasChanges) {
fmt.Fprintf(c.writer, "volume %d sync reached maximum iterations (%d) between %s and %s, may need manual intervention\n",
vcd.write("volume %d sync reached maximum iterations (%d) between %s and %s, may need manual intervention\n",
a.info.Id, maxIterations, a.location.dataNode.Id, b.location.dataNode.Id) a.info.Id, maxIterations, a.location.dataNode.Id, b.location.dataNode.Id)
return fmt.Errorf("reached maximum sync iterations (%d)", maxIterations) return fmt.Errorf("reached maximum sync iterations (%d)", maxIterations)
} }
@ -216,7 +283,7 @@ func (c *commandVolumeCheckDisk) syncTwoReplicas(a *VolumeReplica, b *VolumeRepl
return nil return nil
} }
func (c *commandVolumeCheckDisk) checkBoth(a *VolumeReplica, b *VolumeReplica, applyChanges bool, doSyncDeletions bool, nonRepairThreshold float64, verbose bool) (aHasChanges bool, bHasChanges bool, err error) {
func (vcd *volumeCheckDisk) checkBoth(a *VolumeReplica, b *VolumeReplica) (aHasChanges bool, bHasChanges bool, err error) {
aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb() aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb()
defer func() { defer func() {
aDB.Close() aDB.Close()
@ -224,17 +291,16 @@ func (c *commandVolumeCheckDisk) checkBoth(a *VolumeReplica, b *VolumeReplica, a
}() }()
// read index db // read index db
readIndexDbCutoffFrom := uint64(time.Now().UnixNano())
if err = readIndexDatabase(aDB, a.info.Collection, a.info.Id, pb.NewServerAddressFromDataNode(a.location.dataNode), verbose, c.writer, c.env.option.GrpcDialOption); err != nil {
if err = vcd.readIndexDatabase(aDB, a.info.Collection, a.info.Id, pb.NewServerAddressFromDataNode(a.location.dataNode)); err != nil {
return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", a.location.dataNode, a.info.Id, err) return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", a.location.dataNode, a.info.Id, err)
} }
if err := readIndexDatabase(bDB, b.info.Collection, b.info.Id, pb.NewServerAddressFromDataNode(b.location.dataNode), verbose, c.writer, c.env.option.GrpcDialOption); err != nil {
if err := vcd.readIndexDatabase(bDB, b.info.Collection, b.info.Id, pb.NewServerAddressFromDataNode(b.location.dataNode)); err != nil {
return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", b.location.dataNode, b.info.Id, err) return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", b.location.dataNode, b.info.Id, err)
} }
// find and make up the differences // find and make up the differences
aHasChanges, err1 := doVolumeCheckDisk(bDB, aDB, b, a, verbose, c.writer, applyChanges, doSyncDeletions, nonRepairThreshold, readIndexDbCutoffFrom, c.env.option.GrpcDialOption)
bHasChanges, err2 := doVolumeCheckDisk(aDB, bDB, a, b, verbose, c.writer, applyChanges, doSyncDeletions, nonRepairThreshold, readIndexDbCutoffFrom, c.env.option.GrpcDialOption)
aHasChanges, err1 := vcd.doVolumeCheckDisk(bDB, aDB, b, a)
bHasChanges, err2 := vcd.doVolumeCheckDisk(aDB, bDB, a, b)
if err1 != nil { if err1 != nil {
return aHasChanges, bHasChanges, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", b.location.dataNode.Id, a.location.dataNode.Id, b.info.Id, err1) return aHasChanges, bHasChanges, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", b.location.dataNode.Id, a.location.dataNode.Id, b.info.Id, err1)
} }
@ -244,7 +310,7 @@ func (c *commandVolumeCheckDisk) checkBoth(a *VolumeReplica, b *VolumeReplica, a
return aHasChanges, bHasChanges, nil return aHasChanges, bHasChanges, nil
} }
func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *VolumeReplica, verbose bool, writer io.Writer, applyChanges bool, doSyncDeletions bool, nonRepairThreshold float64, cutoffFromAtNs uint64, grpcDialOption grpc.DialOption) (hasChanges bool, err error) {
func (vcd *volumeCheckDisk) doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *VolumeReplica) (hasChanges bool, err error) {
// find missing keys // find missing keys
// hash join, can be more efficient // hash join, can be more efficient
@ -252,6 +318,8 @@ func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *Vo
var partiallyDeletedNeedles []needle_map.NeedleValue var partiallyDeletedNeedles []needle_map.NeedleValue
var counter int var counter int
doCutoffOfLastNeedle := true doCutoffOfLastNeedle := true
cutoffFromAtNs := uint64(vcd.now.UnixNano())
minuend.DescendingVisit(func(minuendValue needle_map.NeedleValue) error { minuend.DescendingVisit(func(minuendValue needle_map.NeedleValue) error {
counter++ counter++
if subtrahendValue, found := subtrahend.Get(minuendValue.Key); !found { if subtrahendValue, found := subtrahend.Get(minuendValue.Key); !found {
@ -259,7 +327,7 @@ func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *Vo
return nil return nil
} }
if doCutoffOfLastNeedle { if doCutoffOfLastNeedle {
if needleMeta, err := readNeedleMeta(grpcDialOption, pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, minuendValue); err == nil {
if needleMeta, err := readNeedleMeta(vcd.grpcDialOption(), pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, minuendValue); err == nil {
// needles older than the cutoff time are not missing yet // needles older than the cutoff time are not missing yet
if needleMeta.AppendAtNs > cutoffFromAtNs { if needleMeta.AppendAtNs > cutoffFromAtNs {
return nil return nil
@ -279,7 +347,7 @@ func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *Vo
return nil return nil
}) })
fmt.Fprintf(writer, "volume %d %s has %d entries, %s missed %d and partially deleted %d entries\n",
vcd.write("volume %d %s has %d entries, %s missed %d and partially deleted %d entries\n",
source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles), len(partiallyDeletedNeedles)) source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles), len(partiallyDeletedNeedles))
if counter == 0 || (len(missingNeedles) == 0 && len(partiallyDeletedNeedles) == 0) { if counter == 0 || (len(missingNeedles) == 0 && len(partiallyDeletedNeedles) == 0) {
@ -287,45 +355,40 @@ func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *Vo
} }
missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter) missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter)
if missingNeedlesFraction > nonRepairThreshold {
if missingNeedlesFraction > vcd.nonRepairThreshold {
return false, fmt.Errorf( return false, fmt.Errorf(
"failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f", "failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f",
source.info.Id, missingNeedlesFraction, nonRepairThreshold)
source.info.Id, missingNeedlesFraction, vcd.nonRepairThreshold)
} }
for _, needleValue := range missingNeedles { for _, needleValue := range missingNeedles {
needleBlob, err := readSourceNeedleBlob(grpcDialOption, pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, needleValue)
needleBlob, err := vcd.readSourceNeedleBlob(pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, needleValue)
if err != nil { if err != nil {
return hasChanges, err return hasChanges, err
} }
if !applyChanges {
if !vcd.applyChanges {
continue continue
} }
if verbose {
fmt.Fprintf(writer, "read %s %s => %s\n", needleValue.Key.FileId(source.info.Id), source.location.dataNode.Id, target.location.dataNode.Id)
}
vcd.writeVerbose("read %s %s => %s\n", needleValue.Key.FileId(source.info.Id), source.location.dataNode.Id, target.location.dataNode.Id)
hasChanges = true hasChanges = true
if err = writeNeedleBlobToTarget(grpcDialOption, pb.NewServerAddressFromDataNode(target.location.dataNode), source.info.Id, needleValue, needleBlob); err != nil {
if err = vcd.writeNeedleBlobToTarget(pb.NewServerAddressFromDataNode(target.location.dataNode), source.info.Id, needleValue, needleBlob); err != nil {
return hasChanges, err return hasChanges, err
} }
} }
if doSyncDeletions && applyChanges && len(partiallyDeletedNeedles) > 0 {
if vcd.syncDeletions && vcd.applyChanges && len(partiallyDeletedNeedles) > 0 {
var fidList []string var fidList []string
for _, needleValue := range partiallyDeletedNeedles { for _, needleValue := range partiallyDeletedNeedles {
fidList = append(fidList, needleValue.Key.FileId(source.info.Id)) fidList = append(fidList, needleValue.Key.FileId(source.info.Id))
if verbose {
fmt.Fprintf(writer, "delete %s %s => %s\n", needleValue.Key.FileId(source.info.Id), source.location.dataNode.Id, target.location.dataNode.Id)
}
vcd.writeVerbose("delete %s %s => %s\n", needleValue.Key.FileId(source.info.Id), source.location.dataNode.Id, target.location.dataNode.Id)
} }
deleteResults := operation.DeleteFileIdsAtOneVolumeServer( deleteResults := operation.DeleteFileIdsAtOneVolumeServer(
pb.NewServerAddressFromDataNode(target.location.dataNode), pb.NewServerAddressFromDataNode(target.location.dataNode),
grpcDialOption, fidList, false)
vcd.grpcDialOption(), fidList, false)
// Check for errors in results // Check for errors in results
for _, deleteResult := range deleteResults { for _, deleteResult := range deleteResults {
@ -340,9 +403,9 @@ func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *Vo
return hasChanges, nil return hasChanges, nil
} }
func readSourceNeedleBlob(grpcDialOption grpc.DialOption, sourceVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) {
func (vcd *volumeCheckDisk) readSourceNeedleBlob(sourceVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) {
err = operation.WithVolumeServerClient(false, sourceVolumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
err = operation.WithVolumeServerClient(false, sourceVolumeServer, vcd.grpcDialOption(), func(client volume_server_pb.VolumeServerClient) error {
resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{ resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{
VolumeId: volumeId, VolumeId: volumeId,
Offset: needleValue.Offset.ToActualOffset(), Offset: needleValue.Offset.ToActualOffset(),
@ -357,9 +420,9 @@ func readSourceNeedleBlob(grpcDialOption grpc.DialOption, sourceVolumeServer pb.
return return
} }
func writeNeedleBlobToTarget(grpcDialOption grpc.DialOption, targetVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error {
func (vcd *volumeCheckDisk) writeNeedleBlobToTarget(targetVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error {
return operation.WithVolumeServerClient(false, targetVolumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
return operation.WithVolumeServerClient(false, targetVolumeServer, vcd.grpcDialOption(), func(client volume_server_pb.VolumeServerClient) error {
_, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{ _, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{
VolumeId: volumeId, VolumeId: volumeId,
NeedleId: uint64(needleValue.Key), NeedleId: uint64(needleValue.Key),
@ -368,25 +431,21 @@ func writeNeedleBlobToTarget(grpcDialOption grpc.DialOption, targetVolumeServer
}) })
return err return err
}) })
} }
func readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer pb.ServerAddress, verbose bool, writer io.Writer, grpcDialOption grpc.DialOption) error {
func (vcd *volumeCheckDisk) readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer pb.ServerAddress) error {
var buf bytes.Buffer var buf bytes.Buffer
if err := copyVolumeIndexFile(collection, volumeId, volumeServer, &buf, verbose, writer, grpcDialOption); err != nil {
if err := vcd.copyVolumeIndexFile(collection, volumeId, volumeServer, &buf); err != nil {
return err return err
} }
if verbose {
fmt.Fprintf(writer, "load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer)
}
vcd.writeVerbose("load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer)
return db.LoadFilterFromReaderAt(bytes.NewReader(buf.Bytes()), true, false) return db.LoadFilterFromReaderAt(bytes.NewReader(buf.Bytes()), true, false)
} }
func copyVolumeIndexFile(collection string, volumeId uint32, volumeServer pb.ServerAddress, buf *bytes.Buffer, verbose bool, writer io.Writer, grpcDialOption grpc.DialOption) error {
func (vcd *volumeCheckDisk) copyVolumeIndexFile(collection string, volumeId uint32, volumeServer pb.ServerAddress, buf *bytes.Buffer) error {
return operation.WithVolumeServerClient(true, volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
return operation.WithVolumeServerClient(true, volumeServer, vcd.grpcDialOption(), func(volumeServerClient volume_server_pb.VolumeServerClient) error {
ext := ".idx" ext := ".idx"
@ -403,7 +462,7 @@ func copyVolumeIndexFile(collection string, volumeId uint32, volumeServer pb.Ser
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err) return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
} }
err = writeToBuffer(copyFileClient, buf)
err = vcd.writeToBuffer(copyFileClient, buf)
if err != nil { if err != nil {
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err) return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err)
} }
@ -413,7 +472,7 @@ func copyVolumeIndexFile(collection string, volumeId uint32, volumeServer pb.Ser
}) })
} }
func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error {
func (vcd *volumeCheckDisk) writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error {
for { for {
resp, receiveErr := client.Recv() resp, receiveErr := client.Recv()
if receiveErr == io.EOF { if receiveErr == io.EOF {

265
weed/shell/command_volume_check_disk_test.go

@ -1,7 +1,7 @@
package shell package shell
import ( import (
"os"
"bytes"
"testing" "testing"
"time" "time"
@ -13,63 +13,288 @@ type testCommandVolumeCheckDisk struct {
} }
type shouldSkipVolume struct { type shouldSkipVolume struct {
name string
a VolumeReplica a VolumeReplica
b VolumeReplica b VolumeReplica
pulseTimeAtSecond int64 pulseTimeAtSecond int64
syncDeletions bool
shouldSkipVolume bool shouldSkipVolume bool
} }
func TestShouldSkipVolume(t *testing.T) { func TestShouldSkipVolume(t *testing.T) {
cmdVolumeCheckDisk := testCommandVolumeCheckDisk{}
cmdVolumeCheckDisk.writer = os.Stdout
var tests = []shouldSkipVolume{ var tests = []shouldSkipVolume{
{ {
VolumeReplica{nil, &master_pb.VolumeInformationMessage{
name: "identical volumes should be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000, FileCount: 1000,
DeleteCount: 100, DeleteCount: 100,
ModifiedAtSecond: 1696583300}, ModifiedAtSecond: 1696583300},
}, },
VolumeReplica{nil, &master_pb.VolumeInformationMessage{
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000, FileCount: 1000,
DeleteCount: 100, DeleteCount: 100,
ModifiedAtSecond: 1696583300}, ModifiedAtSecond: 1696583300},
}, },
1696583400,
true,
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: true,
}, },
{ {
VolumeReplica{nil, &master_pb.VolumeInformationMessage{
name: "different file counts should not be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1001, FileCount: 1001,
DeleteCount: 100, DeleteCount: 100,
ModifiedAtSecond: 1696583300}, ModifiedAtSecond: 1696583300},
}, },
VolumeReplica{nil, &master_pb.VolumeInformationMessage{
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000, FileCount: 1000,
DeleteCount: 100, DeleteCount: 100,
ModifiedAtSecond: 1696583300}, ModifiedAtSecond: 1696583300},
}, },
1696583400,
false,
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: false,
}, },
{ {
VolumeReplica{nil, &master_pb.VolumeInformationMessage{
name: "different delete counts with syncDeletions enabled should not be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000, FileCount: 1000,
DeleteCount: 100, DeleteCount: 100,
ModifiedAtSecond: 1696583300}, ModifiedAtSecond: 1696583300},
}, },
VolumeReplica{nil, &master_pb.VolumeInformationMessage{
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000, FileCount: 1000,
DeleteCount: 101, DeleteCount: 101,
ModifiedAtSecond: 1696583300}, ModifiedAtSecond: 1696583300},
}, },
1696583400,
false,
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: false,
}, },
{
name: "different delete counts with syncDeletions disabled should be skipped if file counts match",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583300},
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 101,
ModifiedAtSecond: 1696583300},
},
pulseTimeAtSecond: 1696583400,
syncDeletions: false,
shouldSkipVolume: true,
},
// Edge case: Zero file and delete counts
{
name: "volumes with zero file counts should be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 0,
DeleteCount: 0,
ModifiedAtSecond: 1696583300},
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 0,
DeleteCount: 0,
ModifiedAtSecond: 1696583300},
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: true,
},
{
name: "volumes with zero and non-zero file counts should not be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1,
DeleteCount: 0,
ModifiedAtSecond: 1696583300},
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 0,
DeleteCount: 0,
ModifiedAtSecond: 1696583300},
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: false,
},
// Edge case: Recently modified volumes (after pulse time)
// Note: VolumePulsePeriod is 10 seconds, so pulse cutoff is now - 20 seconds
// When both volumes are recently modified, skip check to avoid false positives
{
name: "recently modified volumes with same file counts should be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583395}, // Modified 5 seconds ago
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583390}, // Modified 10 seconds ago
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: true, // Same counts = skip
},
{
name: "one volume modified before pulse cutoff with different file counts should not be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583370}, // Modified 30 seconds ago (before cutoff at -20s)
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 999,
DeleteCount: 100,
ModifiedAtSecond: 1696583370}, // Same modification time
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: false, // Different counts + old enough = needs sync
},
// Edge case: Different ModifiedAtSecond values, same file counts
{
name: "different modification times with same file counts should be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583300}, // 100 seconds before pulse time
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583350}, // 50 seconds before pulse time
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: true, // Same counts, both before cutoff
},
// Edge case: Very close to pulse time boundary
{
name: "volumes modified exactly at pulse cutoff boundary with different counts should not be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1001,
DeleteCount: 100,
ModifiedAtSecond: 1696583379}, // Just before cutoff (pulseTime - 21s)
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583379}, // Just before cutoff
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: false, // At boundary with different counts - needs sync
},
{
name: "volumes modified just after pulse cutoff boundary with same counts should be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583381}, // Just after cutoff (pulseTime - 19s)
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583381}, // Just after cutoff
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: true, // Same counts + recent = skip to avoid false positive
},
// Edge case: Large file count differences
{
name: "large file count difference with old modification time should not be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 10000,
DeleteCount: 100,
ModifiedAtSecond: 1696583300},
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 5000,
DeleteCount: 100,
ModifiedAtSecond: 1696583300},
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: false, // Large difference requires sync
},
// Edge case: Both volumes modified AFTER pulse cutoff time
// When ModifiedAtSecond >= pulseTimeAtSecond for both volumes with same counts,
// the condition (a.info.FileCount != b.info.FileCount) is false, so we skip
// without calling eqVolumeFileCount
{
name: "both volumes modified after pulse cutoff with same file counts should be skipped",
a: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583405}, // After pulse cutoff (1696583380)
},
b: VolumeReplica{nil, &master_pb.VolumeInformationMessage{
FileCount: 1000,
DeleteCount: 100,
ModifiedAtSecond: 1696583410}, // After pulse cutoff
},
pulseTimeAtSecond: 1696583400,
syncDeletions: true,
shouldSkipVolume: true, // Same counts = skip without calling eqVolumeFileCount
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var buf bytes.Buffer
vcd := &volumeCheckDisk{
writer: &buf,
now: time.Unix(tt.pulseTimeAtSecond, 0),
verbose: false, // reduce noise in tests
syncDeletions: tt.syncDeletions,
}
result, err := vcd.shouldSkipVolume(&tt.a, &tt.b)
if err != nil {
// In unit tests, we expect no errors from shouldSkipVolume
// since we're using test data without actual network calls
t.Errorf("shouldSkipVolume() returned unexpected error: %v", err)
return
}
if result != tt.shouldSkipVolume {
t.Errorf("shouldSkipVolume() = %v, want %v\nFileCount A=%d B=%d, DeleteCount A=%d B=%d",
result, tt.shouldSkipVolume,
tt.a.info.FileCount, tt.b.info.FileCount,
tt.a.info.DeleteCount, tt.b.info.DeleteCount)
}
})
}
}
// TestVolumeCheckDiskHelperMethods tests the helper methods on volumeCheckDisk
func TestVolumeCheckDiskHelperMethods(t *testing.T) {
var buf bytes.Buffer
vcd := &volumeCheckDisk{
writer: &buf,
verbose: true,
} }
for num, tt := range tests {
pulseTime := time.Unix(tt.pulseTimeAtSecond, 0)
if isShould := cmdVolumeCheckDisk.shouldSkipVolume(&tt.a, &tt.b, pulseTime, true, true); isShould != tt.shouldSkipVolume {
t.Fatalf("result of should skip volume is unexpected for %d test", num)
}
// Test write method
vcd.write("test %s\n", "message")
if buf.String() != "test message\n" {
t.Errorf("write() output = %q, want %q", buf.String(), "test message\n")
}
// Test writeVerbose with verbose=true
buf.Reset()
vcd.writeVerbose("verbose %d\n", 123)
if buf.String() != "verbose 123\n" {
t.Errorf("writeVerbose() with verbose=true output = %q, want %q", buf.String(), "verbose 123\n")
}
// Test writeVerbose with verbose=false
buf.Reset()
vcd.verbose = false
vcd.writeVerbose("should not appear\n")
if buf.String() != "" {
t.Errorf("writeVerbose() with verbose=false output = %q, want empty", buf.String())
} }
} }

10
weed/shell/command_volume_delete_empty.go

@ -26,7 +26,7 @@ func (c *commandVolumeDeleteEmpty) Name() string {
func (c *commandVolumeDeleteEmpty) Help() string { func (c *commandVolumeDeleteEmpty) Help() string {
return `delete empty volumes from all volume servers return `delete empty volumes from all volume servers
volume.deleteEmpty -quietFor=24h -force
volume.deleteEmpty -quietFor=24h -apply
This command deletes all empty volumes from one volume server. This command deletes all empty volumes from one volume server.
@ -41,11 +41,15 @@ func (c *commandVolumeDeleteEmpty) Do(args []string, commandEnv *CommandEnv, wri
volDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) volDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
quietPeriod := volDeleteCommand.Duration("quietFor", 24*time.Hour, "select empty volumes with no recent writes, avoid newly created ones") quietPeriod := volDeleteCommand.Duration("quietFor", 24*time.Hour, "select empty volumes with no recent writes, avoid newly created ones")
applyBalancing := volDeleteCommand.Bool("force", false, "apply to delete empty volumes")
applyBalancing := volDeleteCommand.Bool("apply", false, "apply to delete empty volumes")
// TODO: remove this alias
applyBalancingAlias := volDeleteCommand.Bool("force", false, "apply to delete empty volumes (alias for -apply)")
if err = volDeleteCommand.Parse(args); err != nil { if err = volDeleteCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyBalancing, "-force")
handleDeprecatedForceFlag(writer, volDeleteCommand, applyBalancingAlias, applyBalancing)
infoAboutSimulationMode(writer, *applyBalancing, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil { if err = commandEnv.confirmIsLocked(args); err != nil {
return return

32
weed/shell/command_volume_fix_replication.go

@ -16,7 +16,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/needle_map" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
@ -46,7 +45,7 @@ func (c *commandVolumeFixReplication) Help() string {
If the free slots satisfy the replication requirement, the volume content is copied over and mounted. If the free slots satisfy the replication requirement, the volume content is copied over and mounted.
volume.fix.replication # do not take action volume.fix.replication # do not take action
volume.fix.replication -force # actually deleting or copying the volume files and mount the volume
volume.fix.replication -apply # actually deleting or copying the volume files and mount the volume
volume.fix.replication -collectionPattern=important* # fix any collections with prefix "important" volume.fix.replication -collectionPattern=important* # fix any collections with prefix "important"
Note: Note:
@ -66,7 +65,9 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
c.collectionPattern = volFixReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'") c.collectionPattern = volFixReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
applyChanges := volFixReplicationCommand.Bool("force", false, "apply the fix")
applyChanges := volFixReplicationCommand.Bool("apply", false, "apply the fix")
// TODO: remove this alias
applyChangesAlias := volFixReplicationCommand.Bool("force", false, "apply the fix (alias for -apply)")
doDelete := volFixReplicationCommand.Bool("doDelete", true, "Also delete over-replicated volumes besides fixing under-replication") doDelete := volFixReplicationCommand.Bool("doDelete", true, "Also delete over-replicated volumes besides fixing under-replication")
doCheck := volFixReplicationCommand.Bool("doCheck", true, "Also check synchronization before deleting") doCheck := volFixReplicationCommand.Bool("doCheck", true, "Also check synchronization before deleting")
maxParallelization := volFixReplicationCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible") maxParallelization := volFixReplicationCommand.Int("maxParallelization", DefaultMaxParallelization, "run up to X tasks in parallel, whenever possible")
@ -76,8 +77,9 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
if err = volFixReplicationCommand.Parse(args); err != nil { if err = volFixReplicationCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyChanges, "-force")
handleDeprecatedForceFlag(writer, volFixReplicationCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
commandEnv.noLock = !*applyChanges commandEnv.noLock = !*applyChanges
if err = commandEnv.confirmIsLocked(args); *applyChanges && err != nil { if err = commandEnv.confirmIsLocked(args); *applyChanges && err != nil {
@ -201,22 +203,32 @@ func collectVolumeReplicaLocations(topologyInfo *master_pb.TopologyInfo) (map[ui
type SelectOneVolumeFunc func(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica type SelectOneVolumeFunc func(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica
func checkOneVolume(a *VolumeReplica, b *VolumeReplica, writer io.Writer, grpcDialOption grpc.DialOption) (err error) {
func checkOneVolume(a *VolumeReplica, b *VolumeReplica, writer io.Writer, commandEnv *CommandEnv) (err error) {
aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb() aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb()
defer func() { defer func() {
aDB.Close() aDB.Close()
bDB.Close() bDB.Close()
}() }()
vcd := &volumeCheckDisk{
writer: writer,
commandEnv: commandEnv,
now: time.Now(),
verbose: false,
applyChanges: true,
syncDeletions: false,
nonRepairThreshold: float64(1),
}
// read index db // read index db
readIndexDbCutoffFrom := uint64(time.Now().UnixNano())
if err = readIndexDatabase(aDB, a.info.Collection, a.info.Id, pb.NewServerAddressFromDataNode(a.location.dataNode), false, writer, grpcDialOption); err != nil {
if err = vcd.readIndexDatabase(aDB, a.info.Collection, a.info.Id, pb.NewServerAddressFromDataNode(a.location.dataNode)); err != nil {
return fmt.Errorf("readIndexDatabase %s volume %d: %v", a.location.dataNode, a.info.Id, err) return fmt.Errorf("readIndexDatabase %s volume %d: %v", a.location.dataNode, a.info.Id, err)
} }
if err := readIndexDatabase(bDB, b.info.Collection, b.info.Id, pb.NewServerAddressFromDataNode(b.location.dataNode), false, writer, grpcDialOption); err != nil {
if err := vcd.readIndexDatabase(bDB, b.info.Collection, b.info.Id, pb.NewServerAddressFromDataNode(b.location.dataNode)); err != nil {
return fmt.Errorf("readIndexDatabase %s volume %d: %v", b.location.dataNode, b.info.Id, err) return fmt.Errorf("readIndexDatabase %s volume %d: %v", b.location.dataNode, b.info.Id, err)
} }
if _, err = doVolumeCheckDisk(aDB, bDB, a, b, false, writer, true, false, float64(1), readIndexDbCutoffFrom, grpcDialOption); err != nil {
if _, err = vcd.doVolumeCheckDisk(aDB, bDB, a, b); err != nil {
return fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", a.location.dataNode.Id, b.location.dataNode.Id, a.info.Id, err) return fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", a.location.dataNode.Id, b.location.dataNode.Id, a.info.Id, err)
} }
return return
@ -268,7 +280,7 @@ func (c *commandVolumeFixReplication) deleteOneVolume(commandEnv *CommandEnv, wr
if replicaB.location.dataNode == replica.location.dataNode { if replicaB.location.dataNode == replica.location.dataNode {
continue continue
} }
if checkErr = checkOneVolume(replica, replicaB, writer, commandEnv.option.GrpcDialOption); checkErr != nil {
if checkErr = checkOneVolume(replica, replicaB, writer, commandEnv); checkErr != nil {
fmt.Fprintf(writer, "sync volume %d on %s and %s: %v\n", replica.info.Id, replica.location.dataNode.Id, replicaB.location.dataNode.Id, checkErr) fmt.Fprintf(writer, "sync volume %d on %s and %s: %v\n", replica.info.Id, replica.location.dataNode.Id, replicaB.location.dataNode.Id, checkErr)
break break
} }

12
weed/shell/command_volume_server_evacuate.go

@ -6,12 +6,13 @@ import (
"io" "io"
"os" "os"
"slices"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block" "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
"slices"
) )
func init() { func init() {
@ -57,17 +58,20 @@ func (c *commandVolumeServerEvacuate) Do(args []string, commandEnv *CommandEnv,
c.volumeRack = vsEvacuateCommand.String("rack", "", "source rack for the volume servers") c.volumeRack = vsEvacuateCommand.String("rack", "", "source rack for the volume servers")
c.targetServer = vsEvacuateCommand.String("target", "", "<host>:<port> of target volume") c.targetServer = vsEvacuateCommand.String("target", "", "<host>:<port> of target volume")
skipNonMoveable := vsEvacuateCommand.Bool("skipNonMoveable", false, "skip volumes that can not be moved") skipNonMoveable := vsEvacuateCommand.Bool("skipNonMoveable", false, "skip volumes that can not be moved")
applyChange := vsEvacuateCommand.Bool("force", false, "actually apply the changes")
applyChange := vsEvacuateCommand.Bool("apply", false, "actually apply the changes")
// TODO: remove this alias
applyChangeAlias := vsEvacuateCommand.Bool("force", false, "actually apply the changes (alias for -apply)")
retryCount := vsEvacuateCommand.Int("retry", 0, "how many times to retry") retryCount := vsEvacuateCommand.Int("retry", 0, "how many times to retry")
if err = vsEvacuateCommand.Parse(args); err != nil { if err = vsEvacuateCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyChange, "-force")
handleDeprecatedForceFlag(writer, vsEvacuateCommand, applyChangeAlias, applyChange)
infoAboutSimulationMode(writer, *applyChange, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil && *applyChange { if err = commandEnv.confirmIsLocked(args); err != nil && *applyChange {
return return
} }
if *volumeServer == "" && *c.volumeRack == "" { if *volumeServer == "" && *c.volumeRack == "" {
return fmt.Errorf("need to specify volume server by -node=<host>:<port> or source rack") return fmt.Errorf("need to specify volume server by -node=<host>:<port> or source rack")
} }

21
weed/shell/command_volume_server_leave.go

@ -4,11 +4,12 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"io"
"github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"google.golang.org/grpc" "google.golang.org/grpc"
"io"
) )
func init() { func init() {
@ -25,7 +26,7 @@ func (c *commandVolumeServerLeave) Name() string {
func (c *commandVolumeServerLeave) Help() string { func (c *commandVolumeServerLeave) Help() string {
return `stop a volume server from sending heartbeats to the master return `stop a volume server from sending heartbeats to the master
volumeServer.leave -node <volume server host:port> -force
volumeServer.leave -node <volume server host:port> [-apply]
This command enables gracefully shutting down the volume server. This command enables gracefully shutting down the volume server.
The volume server will stop sending heartbeats to the master. The volume server will stop sending heartbeats to the master.
@ -43,11 +44,17 @@ func (c *commandVolumeServerLeave) Do(args []string, commandEnv *CommandEnv, wri
vsLeaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) vsLeaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeServer := vsLeaveCommand.String("node", "", "<host>:<port> of the volume server") volumeServer := vsLeaveCommand.String("node", "", "<host>:<port> of the volume server")
applyChanges := vsLeaveCommand.Bool("apply", false, "apply the changes")
// TODO: remove this alias
applyChangesAlias := vsLeaveCommand.Bool("force", false, "apply the changes (alias for -apply)")
if err = vsLeaveCommand.Parse(args); err != nil { if err = vsLeaveCommand.Parse(args); err != nil {
return nil return nil
} }
if err = commandEnv.confirmIsLocked(args); err != nil {
handleDeprecatedForceFlag(writer, vsLeaveCommand, applyChangesAlias, applyChanges)
infoAboutSimulationMode(writer, *applyChanges, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil && *applyChanges {
return return
} }
@ -55,11 +62,15 @@ func (c *commandVolumeServerLeave) Do(args []string, commandEnv *CommandEnv, wri
return fmt.Errorf("need to specify volume server by -node=<host>:<port>") return fmt.Errorf("need to specify volume server by -node=<host>:<port>")
} }
return volumeServerLeave(commandEnv.option.GrpcDialOption, pb.ServerAddress(*volumeServer), writer)
return volumeServerLeave(commandEnv.option.GrpcDialOption, pb.ServerAddress(*volumeServer), writer, *applyChanges)
} }
func volumeServerLeave(grpcDialOption grpc.DialOption, volumeServer pb.ServerAddress, writer io.Writer) (err error) {
func volumeServerLeave(grpcDialOption grpc.DialOption, volumeServer pb.ServerAddress, writer io.Writer, applyChanges bool) (err error) {
if !applyChanges {
fmt.Fprintf(writer, "Would ask volume server %s to leave (dry-run)\n", volumeServer)
return nil
}
return operation.WithVolumeServerClient(false, volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { return operation.WithVolumeServerClient(false, volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, leaveErr := volumeServerClient.VolumeServerLeave(context.Background(), &volume_server_pb.VolumeServerLeaveRequest{}) _, leaveErr := volumeServerClient.VolumeServerLeave(context.Background(), &volume_server_pb.VolumeServerLeaveRequest{})
if leaveErr != nil { if leaveErr != nil {

9
weed/shell/command_volume_tier_move.go

@ -66,19 +66,22 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
source := tierCommand.String("fromDiskType", "", "the source disk type") source := tierCommand.String("fromDiskType", "", "the source disk type")
target := tierCommand.String("toDiskType", "", "the target disk type") target := tierCommand.String("toDiskType", "", "the target disk type")
parallelLimit := tierCommand.Int("parallelLimit", 0, "limit the number of parallel copying jobs") parallelLimit := tierCommand.Int("parallelLimit", 0, "limit the number of parallel copying jobs")
applyChange := tierCommand.Bool("force", false, "actually apply the changes")
applyChange := tierCommand.Bool("apply", false, "actually apply the changes")
// TODO: remove this alias
applyChangeAlias := tierCommand.Bool("force", false, "actually apply the changes (alias for -apply)")
ioBytePerSecond := tierCommand.Int64("ioBytePerSecond", 0, "limit the speed of move") ioBytePerSecond := tierCommand.Int64("ioBytePerSecond", 0, "limit the speed of move")
replicationString := tierCommand.String("toReplication", "", "the new target replication setting") replicationString := tierCommand.String("toReplication", "", "the new target replication setting")
if err = tierCommand.Parse(args); err != nil { if err = tierCommand.Parse(args); err != nil {
return nil return nil
} }
infoAboutSimulationMode(writer, *applyChange, "-force")
handleDeprecatedForceFlag(writer, tierCommand, applyChangeAlias, applyChange)
infoAboutSimulationMode(writer, *applyChange, "-apply")
if err = commandEnv.confirmIsLocked(args); err != nil { if err = commandEnv.confirmIsLocked(args); err != nil {
return return
} }
fromDiskType := types.ToDiskType(*source) fromDiskType := types.ToDiskType(*source)
toDiskType := types.ToDiskType(*target) toDiskType := types.ToDiskType(*target)

12
weed/storage/volume_read.go

@ -165,16 +165,8 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
toWrite := min(count, int(offset+size-x)) toWrite := min(count, int(offset+size-x))
if toWrite > 0 { if toWrite > 0 {
crc = crc.Update(buf[0:toWrite]) crc = crc.Update(buf[0:toWrite])
// the crc.Value() function is to be deprecated. this double checking is for backward compatibility
// with seaweed version using crc.Value() instead of uint32(crc), which appears in commit 056c480eb
// and switch appeared in version 3.09.
if offset == 0 && size == int64(n.DataSize) && int64(count) == size && (n.Checksum != crc && uint32(n.Checksum) != crc.Value()) {
// This check works only if the buffer is big enough to hold the whole needle data
// and we ask for all needle data.
// Otherwise we cannot check the validity of partially aquired data.
stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorCRC).Inc()
return fmt.Errorf("ReadNeedleData checksum %v expected %v for Needle: %v,%v", crc, n.Checksum, v.Id, n)
}
// Note: CRC validation happens after the loop completes (see below)
// to avoid performance overhead in the hot read path
if _, err = writer.Write(buf[0:toWrite]); err != nil { if _, err = writer.Write(buf[0:toWrite]); err != nil {
return fmt.Errorf("ReadNeedleData write: %w", err) return fmt.Errorf("ReadNeedleData write: %w", err)
} }

Loading…
Cancel
Save