Browse Source

Merge branch 'master' into allow_delete_objects_by_TTL

pull/7426/head
Konstantin Lebedev 4 weeks ago
committed by GitHub
parent
commit
d1b5d95d84
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 2
      .github/workflows/helm_ci.yml
  2. 32
      go.mod
  3. 64
      go.sum
  4. 2
      k8s/charts/seaweedfs/Chart.yaml
  5. 249
      test/s3/retention/s3_bucket_delete_with_lock_test.go
  6. 2
      weed/command/benchmark.go
  7. 6
      weed/command/download.go
  8. 2
      weed/s3api/auth_credentials_subscribe.go
  9. 2
      weed/s3api/auto_signature_v4_test.go
  10. 2
      weed/s3api/filer_multipart.go
  11. 1
      weed/s3api/s3_constants/s3_actions.go
  12. 4
      weed/s3api/s3api_bucket_config.go
  13. 181
      weed/s3api/s3api_bucket_handlers.go
  14. 4
      weed/s3api/s3api_object_handlers_acl.go
  15. 4
      weed/s3api/s3api_object_handlers_list.go
  16. 10
      weed/s3api/s3api_object_handlers_put.go
  17. 18
      weed/s3api/s3api_object_versioning.go
  18. 10
      weed/util/net_timeout.go
  19. 4
      weed/util/version/constants.go

2
.github/workflows/helm_ci.yml

@ -45,7 +45,7 @@ jobs:
run: ct lint --target-branch ${{ github.event.repository.default_branch }} --all --validate-maintainers=false --chart-dirs k8s/charts
- name: Create kind cluster
uses: helm/kind-action@v1.12.0
uses: helm/kind-action@v1.13.0
- name: Run chart-testing (install)
run: ct install --target-branch ${{ github.event.repository.default_branch }} --all --chart-dirs k8s/charts

32
go.mod

@ -7,7 +7,7 @@ toolchain go1.24.1
require (
cloud.google.com/go v0.121.6 // indirect
cloud.google.com/go/pubsub v1.50.1
cloud.google.com/go/storage v1.57.0
cloud.google.com/go/storage v1.57.1
github.com/Shopify/sarama v1.38.1
github.com/aws/aws-sdk-go v1.55.8
github.com/beorn7/perks v1.0.1 // indirect
@ -91,7 +91,7 @@ require (
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
go.etcd.io/etcd/client/v3 v3.6.5
go.mongodb.org/mongo-driver v1.17.4
go.mongodb.org/mongo-driver v1.17.6
go.opencensus.io v0.24.0 // indirect
gocloud.dev v0.43.0
gocloud.dev/pubsub/natspubsub v0.43.0
@ -125,10 +125,10 @@ require (
github.com/a-h/templ v0.3.943
github.com/arangodb/go-driver v1.6.7
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go-v2 v1.39.4
github.com/aws/aws-sdk-go-v2 v1.39.5
github.com/aws/aws-sdk-go-v2/config v1.31.3
github.com/aws/aws-sdk-go-v2/credentials v1.18.19
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3
github.com/aws/aws-sdk-go-v2/credentials v1.18.20
github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1
github.com/cognusion/imaging v1.0.2
github.com/fluent/fluent-logger-golang v1.10.1
github.com/getsentry/sentry-go v0.36.1
@ -259,22 +259,22 @@ require (
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.12 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.12 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.12 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.12 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.39.0 // indirect
github.com/aws/smithy-go v1.23.1 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect

64
go.sum

@ -477,8 +477,8 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
cloud.google.com/go/storage v1.57.0 h1:4g7NB7Ta7KetVbOMpCqy89C+Vg5VE8scqlSHUPm7Rds=
cloud.google.com/go/storage v1.57.0/go.mod h1:329cwlpzALLgJuu8beyJ/uvQznDHpa2U5lGjWednkzg=
cloud.google.com/go/storage v1.57.1 h1:gzao6odNJ7dR3XXYvAgPK+Iw4fVPPznEPPyNjbaVkq8=
cloud.google.com/go/storage v1.57.1/go.mod h1:329cwlpzALLgJuu8beyJ/uvQznDHpa2U5lGjWednkzg=
cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
@ -664,46 +664,46 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
github.com/aws/aws-sdk-go-v2 v1.39.4 h1:qTsQKcdQPHnfGYBBs+Btl8QwxJeoWcOcPcixK90mRhg=
github.com/aws/aws-sdk-go-v2 v1.39.4/go.mod h1:yWSxrnioGUZ4WVv9TgMrNUeLV3PFESn/v+6T/Su8gnM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
github.com/aws/aws-sdk-go-v2 v1.39.5 h1:e/SXuia3rkFtapghJROrydtQpfQaaUgd1cUvyO1mp2w=
github.com/aws/aws-sdk-go-v2 v1.39.5/go.mod h1:yWSxrnioGUZ4WVv9TgMrNUeLV3PFESn/v+6T/Su8gnM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko=
github.com/aws/aws-sdk-go-v2/config v1.31.3 h1:RIb3yr/+PZ18YYNe6MDiG/3jVoJrPmdoCARwNkMGvco=
github.com/aws/aws-sdk-go-v2/config v1.31.3/go.mod h1:jjgx1n7x0FAKl6TnakqrpkHWWKcX3xfWtdnIJs5K9CE=
github.com/aws/aws-sdk-go-v2/credentials v1.18.19 h1:Jc1zzwkSY1QbkEcLujwqRTXOdvW8ppND3jRBb/VhBQc=
github.com/aws/aws-sdk-go-v2/credentials v1.18.19/go.mod h1:DIfQ9fAk5H0pGtnqfqkbSIzky82qYnGvh06ASQXXg6A=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11 h1:X7X4YKb+c0rkI6d4uJ5tEMxXgCZ+jZ/D6mvkno8c8Uw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.11/go.mod h1:EqM6vPZQsZHYvC4Cai35UDg/f5NCEU+vp0WfbVqVcZc=
github.com/aws/aws-sdk-go-v2/credentials v1.18.20 h1:KFndAnHd9NUuzikHjQ8D5CfFVO+bgELkmcGY8yAw98Q=
github.com/aws/aws-sdk-go-v2/credentials v1.18.20/go.mod h1:9mCi28a+fmBHSQ0UM79omkz6JtN+PEsvLrnG36uoUv0=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.12 h1:VO3FIM2TDbm0kqp6sFNR0PbioXJb/HzCDW6NtIZpIWE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.12/go.mod h1:6C39gB8kg82tx3r72muZSrNhHia9rjGkX7ORaS2GKNE=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11 h1:7AANQZkF3ihM8fbdftpjhken0TP9sBzFbV/Ze/Y4HXA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.11/go.mod h1:NTF4QCGkm6fzVwncpkFQqoquQyOolcyXfbpC98urj+c=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11 h1:ShdtWUZT37LCAA4Mw2kJAJtzaszfSHFb5n25sdcv4YE=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.11/go.mod h1:7bUb2sSr2MZ3M/N+VyETLTQtInemHXb/Fl3s8CLzm0Y=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.12 h1:p/9flfXdoAnwJnuW9xHEAFY22R3A6skYkW19JFF9F+8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.12/go.mod h1:ZTLHakoVCTtW8AaLGSwJ3LXqHD9uQKnOcv1TrpO6u2k=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.12 h1:2lTWFvRcnWFFLzHWmtddu5MTchc5Oj2OOey++99tPZ0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.12/go.mod h1:hI92pK+ho8HVcWMHKHrK3Uml4pfG7wvL86FzO0LVtQQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12 h1:itu4KHu8JK/N6NcLIISlf3LL1LccMqruLUXZ9y7yBZw=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.12/go.mod h1:i+6vTU3xziikTY3vcox23X8pPGW5X3wVgd1VZ7ha+x8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2 h1:xtuxji5CS0JknaXoACOunXOYOQzgfTvGAc9s2QdCJA4=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.2/go.mod h1:zxwi0DIR0rcRcgdbl7E2MSOvxDyyXGBlScvBkARFaLQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 h1:by3nYZLR9l8bUH7kgaMU4dJgYFjyRdFEfORlDpPILB4=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11 h1:GpMf3z2KJa4RnJ0ew3Hac+hRFYLZ9DDjfgXjuW+pB54=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.11/go.mod h1:6MZP3ZI4QQsgUCFTwMZA2V0sEriNQ8k2hmoHF3qjimQ=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls=
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3 h1:P18I4ipbk+b/3dZNq5YYh+Hq6XC0vp5RWkLp1tJldDA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3/go.mod h1:Rm3gw2Jov6e6kDuamDvyIlZJDMYk97VeCZ82wz/mVZ0=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3 h1:NEe7FaViguRQEm8zl8Ay/kC/QRsMtWUiCGZajQIsLdc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.3/go.mod h1:JLuCKu5VfiLBBBl/5IzZILU7rxS0koQpHzMOCzycOJU=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.12 h1:MM8imH7NZ0ovIVX7D2RxfMDv7Jt9OiUXkcQ+GqywA7M=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.12/go.mod h1:gf4OGwdNkbEsb7elw2Sy76odfhwNktWII3WgvQgQQ6w=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12 h1:R3uW0iKl8rgNEXNjVGliW/oMEh9fO/LlUEV8RvIFr1I=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.12/go.mod h1:XEttbEr5yqsw8ebi7vlDoGJJjMXRez4/s9pibpJyL5s=
github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1 h1:Dq82AV+Qxpno/fG162eAhnD8d48t9S+GZCfz7yv1VeA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.89.1/go.mod h1:MbKLznDKpf7PnSonNRUVYZzfP0CeLkRIUexeblgKcU4=
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 h1:OBuZE9Wt8h2imuRktu+WfjiTGrnYdCIJg8IX92aalHE=
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7/go.mod h1:4WYoZAhHt+dWYpoOQUgkUKfuQbE6Gg/hW4oXE0pKS9U=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHfiv5OCRwN/EnCOXQ=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI=
github.com/aws/aws-sdk-go-v2/service/sso v1.29.8 h1:M5nimZmugcZUO9wG7iVtROxPhiqyZX6ejS1lxlDPbTU=
github.com/aws/aws-sdk-go-v2/service/sso v1.29.8/go.mod h1:mbef/pgKhtKRwrigPPs7SSSKZgytzP8PQ6P6JAAdqyM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3 h1:S5GuJZpYxE0lKeMHKn+BRTz6PTFpgThyJ+5mYfux7BM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.3/go.mod h1:X4OF+BTd7HIb3L+tc4UlWHVrpgwZZIVENU15pRDVTI0=
github.com/aws/aws-sdk-go-v2/service/sts v1.38.9 h1:Ekml5vGg6sHSZLZJQJagefnVe6PmqC2oiRkBq4F7fU0=
github.com/aws/aws-sdk-go-v2/service/sts v1.38.9/go.mod h1:/e15V+o1zFHWdH3u7lpI3rVBcxszktIKuHKCY2/py+k=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.0 h1:xHXvxst78wBpJFgDW07xllOx0IAzbryrSdM4nMVQ4Dw=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.0/go.mod h1:/e8m+AO6HNPPqMyfKRtzZ9+mBF5/x1Wk8QiDva4m07I=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.4 h1:tBw2Qhf0kj4ZwtsVpDiVRU3zKLvjvjgIjHMKirxXg8M=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.4/go.mod h1:Deq4B7sRM6Awq/xyOBlxBdgW8/Z926KYNNaGMW2lrkA=
github.com/aws/aws-sdk-go-v2/service/sts v1.39.0 h1:C+BRMnasSYFcgDw8o9H5hzehKzXyAb9GY5v/8bP9DUY=
github.com/aws/aws-sdk-go-v2/service/sts v1.39.0/go.mod h1:4EjU+4mIx6+JqKQkruye+CaigV7alL3thVPfDd9VlMs=
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bazelbuild/rules_go v0.46.0 h1:CTefzjN/D3Cdn3rkrM6qMWuQj59OBcuOjyIp3m4hZ7s=
@ -1830,8 +1830,8 @@ go.etcd.io/etcd/client/pkg/v3 v3.6.5 h1:Duz9fAzIZFhYWgRjp/FgNq2gO1jId9Yae/rLn3Rr
go.etcd.io/etcd/client/pkg/v3 v3.6.5/go.mod h1:8Wx3eGRPiy0qOFMZT/hfvdos+DjEaPxdIDiCDUv/FQk=
go.etcd.io/etcd/client/v3 v3.6.5 h1:yRwZNFBx/35VKHTcLDeO7XVLbCBFbPi+XV4OC3QJf2U=
go.etcd.io/etcd/client/v3 v3.6.5/go.mod h1:ZqwG/7TAFZ0BJ0jXRPoJjKQJtbFo/9NIY8uoFFKcCyo=
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=

2
k8s/charts/seaweedfs/Chart.yaml

@ -1,6 +1,6 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "4.00"
appVersion: "3.99"
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
version: 4.0.400

249
test/s3/retention/s3_bucket_delete_with_lock_test.go

@ -0,0 +1,249 @@
package retention
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestBucketDeletionWithObjectLock tests that buckets with object lock enabled
// cannot be deleted if they contain objects with active retention or legal hold
func TestBucketDeletionWithObjectLock(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket with object lock enabled
createBucketWithObjectLock(t, client, bucketName)
// Table-driven test for retention modes
retentionTestCases := []struct {
name string
lockMode types.ObjectLockMode
key string
content string
}{
{
name: "ComplianceRetention",
lockMode: types.ObjectLockModeCompliance,
key: "test-compliance-retention",
content: "test content for compliance retention",
},
{
name: "GovernanceRetention",
lockMode: types.ObjectLockModeGovernance,
key: "test-governance-retention",
content: "test content for governance retention",
},
}
for _, tc := range retentionTestCases {
t.Run(fmt.Sprintf("CannotDeleteBucketWith%s", tc.name), func(t *testing.T) {
retainUntilDate := time.Now().Add(10 * time.Second) // 10 seconds in future
// Upload object with retention
_, err := client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(tc.key),
Body: strings.NewReader(tc.content),
ObjectLockMode: tc.lockMode,
ObjectLockRetainUntilDate: aws.Time(retainUntilDate),
})
require.NoError(t, err, "PutObject with %s should succeed", tc.name)
// Try to delete bucket - should fail because object has active retention
_, err = client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.Error(t, err, "DeleteBucket should fail when objects have active retention")
assert.Contains(t, err.Error(), "BucketNotEmpty", "Error should be BucketNotEmpty")
t.Logf("Expected error: %v", err)
// Wait for retention to expire with dynamic sleep based on actual retention time
t.Logf("Waiting for %s to expire...", tc.name)
time.Sleep(time.Until(retainUntilDate) + time.Second)
// Delete the object
_, err = client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(tc.key),
})
require.NoError(t, err, "DeleteObject should succeed after retention expires")
// Clean up versions
deleteAllObjectVersions(t, client, bucketName)
})
}
// Test 3: Bucket deletion with legal hold should fail
t.Run("CannotDeleteBucketWithLegalHold", func(t *testing.T) {
key := "test-legal-hold"
content := "test content for legal hold"
// Upload object first
_, err := client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: strings.NewReader(content),
})
require.NoError(t, err, "PutObject should succeed")
// Set legal hold on the object
_, err = client.PutObjectLegalHold(context.Background(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
LegalHold: &types.ObjectLockLegalHold{Status: types.ObjectLockLegalHoldStatusOn},
})
require.NoError(t, err, "PutObjectLegalHold should succeed")
// Try to delete bucket - should fail because object has active legal hold
_, err = client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.Error(t, err, "DeleteBucket should fail when objects have active legal hold")
assert.Contains(t, err.Error(), "BucketNotEmpty", "Error should be BucketNotEmpty")
t.Logf("Expected error: %v", err)
// Remove legal hold
_, err = client.PutObjectLegalHold(context.Background(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
LegalHold: &types.ObjectLockLegalHold{Status: types.ObjectLockLegalHoldStatusOff},
})
require.NoError(t, err, "Removing legal hold should succeed")
// Delete the object
_, err = client.DeleteObject(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err, "DeleteObject should succeed after legal hold is removed")
// Clean up versions
deleteAllObjectVersions(t, client, bucketName)
})
// Test 4: Bucket deletion should succeed when no objects have active locks
t.Run("CanDeleteBucketWithoutActiveLocks", func(t *testing.T) {
// Make sure all objects are deleted
deleteAllObjectVersions(t, client, bucketName)
// Use retry mechanism for eventual consistency instead of fixed sleep
require.Eventually(t, func() bool {
_, err := client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
t.Logf("Retrying DeleteBucket due to: %v", err)
return false
}
return true
}, 5*time.Second, 500*time.Millisecond, "DeleteBucket should succeed when no objects have active locks")
t.Logf("Successfully deleted bucket without active locks")
})
}
// TestBucketDeletionWithVersionedLocks tests deletion with versioned objects under lock
func TestBucketDeletionWithVersionedLocks(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket with object lock enabled
createBucketWithObjectLock(t, client, bucketName)
defer deleteBucket(t, client, bucketName) // Best effort cleanup
key := "test-versioned-locks"
content1 := "version 1 content"
content2 := "version 2 content"
retainUntilDate := time.Now().Add(10 * time.Second)
// Upload first version with retention
putResp1, err := client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: strings.NewReader(content1),
ObjectLockMode: types.ObjectLockModeGovernance,
ObjectLockRetainUntilDate: aws.Time(retainUntilDate),
})
require.NoError(t, err)
version1 := *putResp1.VersionId
// Upload second version with retention
putResp2, err := client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: strings.NewReader(content2),
ObjectLockMode: types.ObjectLockModeGovernance,
ObjectLockRetainUntilDate: aws.Time(retainUntilDate),
})
require.NoError(t, err)
version2 := *putResp2.VersionId
t.Logf("Created two versions: %s, %s", version1, version2)
// Try to delete bucket - should fail because versions have active retention
_, err = client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.Error(t, err, "DeleteBucket should fail when object versions have active retention")
assert.Contains(t, err.Error(), "BucketNotEmpty", "Error should be BucketNotEmpty")
t.Logf("Expected error: %v", err)
// Wait for retention to expire with dynamic sleep based on actual retention time
t.Logf("Waiting for retention to expire on all versions...")
time.Sleep(time.Until(retainUntilDate) + time.Second)
// Clean up all versions
deleteAllObjectVersions(t, client, bucketName)
// Wait for eventual consistency and attempt to delete the bucket with retry
require.Eventually(t, func() bool {
_, err := client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
t.Logf("Retrying DeleteBucket due to: %v", err)
return false
}
return true
}, 5*time.Second, 500*time.Millisecond, "DeleteBucket should succeed after all locks expire")
t.Logf("Successfully deleted bucket after locks expired")
}
// TestBucketDeletionWithoutObjectLock tests that buckets without object lock can be deleted normally
func TestBucketDeletionWithoutObjectLock(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create regular bucket without object lock
createBucket(t, client, bucketName)
// Upload some objects
for i := 0; i < 3; i++ {
_, err := client.PutObject(context.Background(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(fmt.Sprintf("test-object-%d", i)),
Body: strings.NewReader("test content"),
})
require.NoError(t, err)
}
// Delete all objects
deleteAllObjectVersions(t, client, bucketName)
// Delete bucket should succeed
_, err := client.DeleteBucket(context.Background(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err, "DeleteBucket should succeed for regular bucket")
t.Logf("Successfully deleted regular bucket without object lock")
}

2
weed/command/benchmark.go

@ -141,7 +141,7 @@ func runBenchmark(cmd *Command, args []string) bool {
fmt.Fprintln(os.Stderr, "Error: -readOnly and -writeOnly are mutually exclusive.")
return false
}
doWrite := true
doRead := true
if *b.readOnly {

6
weed/command/download.go

@ -23,9 +23,9 @@ var (
)
type DownloadOptions struct {
master *string
server *string // deprecated, for backward compatibility
dir *string
master *string
server *string // deprecated, for backward compatibility
dir *string
}
func init() {

2
weed/s3api/auth_credentials_subscribe.go

@ -109,7 +109,7 @@ func (s3a *S3ApiServer) updateBucketConfigCacheFromEntry(entry *filer_pb.Entry)
bucket := entry.Name
glog.V(3).Infof("updateBucketConfigCacheFromEntry: called for bucket %s, ExtObjectLockEnabledKey=%s",
glog.V(3).Infof("updateBucketConfigCacheFromEntry: called for bucket %s, ExtObjectLockEnabledKey=%s",
bucket, string(entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
// Create new bucket config from the entry

2
weed/s3api/auto_signature_v4_test.go

@ -491,7 +491,7 @@ func TestSignatureV4WithoutProxy(t *testing.T) {
// Set forwarded headers
r.Header.Set("Host", tt.host)
// First, verify that extractHostHeader returns the expected value
extractedHost := extractHostHeader(r)
if extractedHost != tt.expectedHost {

2
weed/s3api/filer_multipart.go

@ -313,7 +313,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(r *http.Request, input *s3.Compl
// For versioned buckets, create a version and return the version ID
versionId := generateVersionId()
versionFileName := s3a.getVersionFileName(versionId)
versionDir := dirName + "/" + entryName + ".versions"
versionDir := dirName + "/" + entryName + s3_constants.VersionsFolder
// Move the completed object to the versions directory
err = s3a.mkFile(versionDir, versionFileName, finalParts, func(versionEntry *filer_pb.Entry) {

1
weed/s3api/s3_constants/s3_actions.go

@ -27,5 +27,6 @@ const (
SeaweedStorageDestinationHeader = "x-seaweedfs-destination"
MultipartUploadsFolder = ".uploads"
VersionsFolder = ".versions"
FolderMimeType = "httpd/unix-directory"
)

4
weed/s3api/s3api_bucket_config.go

@ -350,7 +350,7 @@ func (s3a *S3ApiServer) getBucketConfig(bucket string) (*BucketConfig, s3err.Err
// Extract configuration from extended attributes
if entry.Extended != nil {
glog.V(3).Infof("getBucketConfig: checking extended attributes for bucket %s, ExtObjectLockEnabledKey value=%s",
glog.V(3).Infof("getBucketConfig: checking extended attributes for bucket %s, ExtObjectLockEnabledKey value=%s",
bucket, string(entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
if versioning, exists := entry.Extended[s3_constants.ExtVersioningKey]; exists {
config.Versioning = string(versioning)
@ -435,7 +435,7 @@ func (s3a *S3ApiServer) updateBucketConfig(bucket string, updateFn func(*BucketC
glog.Errorf("updateBucketConfig: failed to store Object Lock configuration for bucket %s: %v", bucket, err)
return s3err.ErrInternalError
}
glog.V(3).Infof("updateBucketConfig: stored Object Lock config in extended attributes for bucket %s, key=%s, value=%s",
glog.V(3).Infof("updateBucketConfig: stored Object Lock config in extended attributes for bucket %s, key=%s, value=%s",
bucket, s3_constants.ExtObjectLockEnabledKey, string(config.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]))
}

181
weed/s3api/s3api_bucket_handlers.go

@ -10,7 +10,9 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util"
"math"
"net/http"
"path"
"sort"
"strconv"
"strings"
"time"
@ -252,6 +254,28 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
return
}
// Check if bucket has object lock enabled
bucketConfig, errCode := s3a.getBucketConfig(bucket)
if errCode != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, errCode)
return
}
// If object lock is enabled, check for objects with active locks
if bucketConfig.ObjectLockConfig != nil {
hasLockedObjects, checkErr := s3a.hasObjectsWithActiveLocks(bucket)
if checkErr != nil {
glog.Errorf("DeleteBucketHandler: failed to check for locked objects in bucket %s: %v", bucket, checkErr)
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
if hasLockedObjects {
glog.V(3).Infof("DeleteBucketHandler: bucket %s has objects with active object locks, cannot delete", bucket)
s3err.WriteErrorResponse(w, r, s3err.ErrBucketNotEmpty)
return
}
}
err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
if !s3a.option.AllowDeleteBucketNotEmpty {
entries, _, err := s3a.list(s3a.option.BucketsPath+"/"+bucket, "", "", false, 2)
@ -259,7 +283,9 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
return fmt.Errorf("failed to list bucket %s: %v", bucket, err)
}
for _, entry := range entries {
if entry.Name != s3_constants.MultipartUploadsFolder {
// Allow bucket deletion if only special directories remain
if entry.Name != s3_constants.MultipartUploadsFolder &&
!strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
return errors.New(s3err.GetAPIError(s3err.ErrBucketNotEmpty).Code)
}
}
@ -300,6 +326,159 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
s3err.WriteEmptyResponse(w, r, http.StatusNoContent)
}
// hasObjectsWithActiveLocks checks if any objects in the bucket have active retention or legal hold
func (s3a *S3ApiServer) hasObjectsWithActiveLocks(bucket string) (bool, error) {
bucketPath := s3a.option.BucketsPath + "/" + bucket
// Check all objects including versions for active locks
// Establish current time once at the start for consistency across the entire scan
hasLocks := false
currentTime := time.Now()
err := s3a.recursivelyCheckLocks(bucketPath, "", &hasLocks, currentTime)
if err != nil {
return false, fmt.Errorf("error checking for locked objects: %w", err)
}
return hasLocks, nil
}
const (
// lockCheckPaginationSize is the page size for listing directories during lock checks
lockCheckPaginationSize = 10000
)
// errStopPagination is a sentinel error to signal early termination of pagination
var errStopPagination = errors.New("stop pagination")
// paginateEntries iterates through directory entries with pagination
// Calls fn for each page of entries. If fn returns errStopPagination, iteration stops successfully.
func (s3a *S3ApiServer) paginateEntries(dir string, fn func(entries []*filer_pb.Entry) error) error {
startFrom := ""
for {
entries, isLast, err := s3a.list(dir, "", startFrom, false, lockCheckPaginationSize)
if err != nil {
// Fail-safe: propagate error to prevent incorrect bucket deletion
return fmt.Errorf("failed to list directory %s: %w", dir, err)
}
if err := fn(entries); err != nil {
if errors.Is(err, errStopPagination) {
return nil
}
return err
}
if isLast || len(entries) == 0 {
break
}
// Use the last entry name as the start point for next page
startFrom = entries[len(entries)-1].Name
}
return nil
}
// recursivelyCheckLocks recursively checks all objects and versions for active locks
// Uses pagination to handle directories with more than 10,000 entries
func (s3a *S3ApiServer) recursivelyCheckLocks(dir string, relativePath string, hasLocks *bool, currentTime time.Time) error {
if *hasLocks {
// Early exit if we've already found a locked object
return nil
}
// Process entries in the current directory with pagination
err := s3a.paginateEntries(dir, func(entries []*filer_pb.Entry) error {
for _, entry := range entries {
if *hasLocks {
// Early exit if we've already found a locked object
return errStopPagination
}
// Skip special directories (multipart uploads, etc)
if entry.Name == s3_constants.MultipartUploadsFolder {
continue
}
if entry.IsDirectory {
subDir := path.Join(dir, entry.Name)
if strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
// If it's a .versions directory, check all version files with pagination
err := s3a.paginateEntries(subDir, func(versionEntries []*filer_pb.Entry) error {
for _, versionEntry := range versionEntries {
if s3a.entryHasActiveLock(versionEntry, currentTime) {
*hasLocks = true
glog.V(2).Infof("Found object with active lock in versions: %s/%s", subDir, versionEntry.Name)
return errStopPagination
}
}
return nil
})
if err != nil {
return err
}
} else {
// Recursively check other subdirectories
subRelativePath := path.Join(relativePath, entry.Name)
if err := s3a.recursivelyCheckLocks(subDir, subRelativePath, hasLocks, currentTime); err != nil {
return err
}
// Early exit if a locked object was found in the subdirectory
if *hasLocks {
return errStopPagination
}
}
} else {
// Check regular files for locks
if s3a.entryHasActiveLock(entry, currentTime) {
*hasLocks = true
objectPath := path.Join(relativePath, entry.Name)
glog.V(2).Infof("Found object with active lock: %s", objectPath)
return errStopPagination
}
}
}
return nil
})
return err
}
// entryHasActiveLock checks if an entry has an active retention or legal hold
func (s3a *S3ApiServer) entryHasActiveLock(entry *filer_pb.Entry, currentTime time.Time) bool {
if entry.Extended == nil {
return false
}
// Check for active legal hold
if legalHoldBytes, exists := entry.Extended[s3_constants.ExtLegalHoldKey]; exists {
if string(legalHoldBytes) == s3_constants.LegalHoldOn {
return true
}
}
// Check for active retention
if modeBytes, exists := entry.Extended[s3_constants.ExtObjectLockModeKey]; exists {
mode := string(modeBytes)
if mode == s3_constants.RetentionModeCompliance || mode == s3_constants.RetentionModeGovernance {
// Check if retention is still active
if dateBytes, dateExists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; dateExists {
timestamp, err := strconv.ParseInt(string(dateBytes), 10, 64)
if err != nil {
// Fail-safe: if we can't parse the retention date, assume the object is locked
// to prevent accidental data loss
glog.Warningf("Failed to parse retention date '%s' for entry, assuming locked: %v", string(dateBytes), err)
return true
}
retainUntil := time.Unix(timestamp, 0)
if retainUntil.After(currentTime) {
return true
}
}
}
}
return false
}
func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := s3_constants.GetBucketAndObject(r)

4
weed/s3api/s3api_object_handlers_acl.go

@ -308,7 +308,7 @@ func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Reque
if versioningConfigured {
if versionId != "" && versionId != "null" {
// Versioned object - update the specific version file in .versions directory
updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + ".versions"
updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + s3_constants.VersionsFolder
} else {
// Latest version in versioned bucket - could be null version or versioned object
// Extract version ID from the entry to determine where it's stored
@ -324,7 +324,7 @@ func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Reque
updateDirectory = s3a.option.BucketsPath + "/" + bucket
} else {
// Versioned object - stored in .versions directory
updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + ".versions"
updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + s3_constants.VersionsFolder
}
}
} else {

4
weed/s3api/s3api_object_handlers_list.go

@ -517,7 +517,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
}
// Skip .versions directories in regular list operations but track them for logical object creation
if strings.HasSuffix(entry.Name, ".versions") {
if strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
glog.V(4).Infof("Found .versions directory: %s", entry.Name)
versionsDirs = append(versionsDirs, entry.Name)
continue
@ -572,7 +572,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
}
// Extract object name from .versions directory name (remove .versions suffix)
baseObjectName := strings.TrimSuffix(versionsDir, ".versions")
baseObjectName := strings.TrimSuffix(versionsDir, s3_constants.VersionsFolder)
// Construct full object path relative to bucket
// dir is something like "/buckets/sea-test-1/Veeam/Backup/vbr/Config"

10
weed/s3api/s3api_object_handlers_put.go

@ -465,7 +465,7 @@ func (s3a *S3ApiServer) putSuspendedVersioningObject(r *http.Request, bucket, ob
// Check if there's an existing null version in .versions directory and delete it
// This ensures suspended versioning properly overwrites the null version as per S3 spec
// Note: We only delete null versions, NOT regular versions (those should be preserved)
versionsObjectPath := normalizedObject + ".versions"
versionsObjectPath := normalizedObject + s3_constants.VersionsFolder
versionsDir := bucketDir + "/" + versionsObjectPath
entries, _, err := s3a.list(versionsDir, "", "", false, 1000)
if err == nil {
@ -619,7 +619,7 @@ func (s3a *S3ApiServer) putSuspendedVersioningObject(r *http.Request, bucket, ob
// when a new "null" version becomes the latest during suspended versioning
func (s3a *S3ApiServer) updateIsLatestFlagsForSuspendedVersioning(bucket, object string) error {
bucketDir := s3a.option.BucketsPath + "/" + bucket
versionsObjectPath := object + ".versions"
versionsObjectPath := object + s3_constants.VersionsFolder
versionsDir := bucketDir + "/" + versionsObjectPath
glog.V(2).Infof("updateIsLatestFlagsForSuspendedVersioning: updating flags for %s%s", bucket, object)
@ -698,12 +698,12 @@ func (s3a *S3ApiServer) putVersionedObject(r *http.Request, bucket, object strin
// Upload directly to the versions directory
// We need to construct the object path relative to the bucket
versionObjectPath := normalizedObject + ".versions/" + versionFileName
versionObjectPath := normalizedObject + s3_constants.VersionsFolder + "/" + versionFileName
versionUploadUrl := s3a.toFilerUrl(bucket, versionObjectPath)
// Ensure the .versions directory exists before uploading
bucketDir := s3a.option.BucketsPath + "/" + bucket
versionsDir := normalizedObject + ".versions"
versionsDir := normalizedObject + s3_constants.VersionsFolder
err := s3a.mkdir(bucketDir, versionsDir, func(entry *filer_pb.Entry) {
entry.Attributes.Mime = s3_constants.FolderMimeType
})
@ -793,7 +793,7 @@ func (s3a *S3ApiServer) putVersionedObject(r *http.Request, bucket, object strin
// updateLatestVersionInDirectory updates the .versions directory metadata to indicate the latest version
func (s3a *S3ApiServer) updateLatestVersionInDirectory(bucket, object, versionId, versionFileName string) error {
bucketDir := s3a.option.BucketsPath + "/" + bucket
versionsObjectPath := object + ".versions"
versionsObjectPath := object + s3_constants.VersionsFolder
// Get the current .versions directory entry with retry logic for filer consistency
var versionsEntry *filer_pb.Entry

18
weed/s3api/s3api_object_versioning.go

@ -95,7 +95,7 @@ func generateVersionId() string {
// getVersionedObjectDir returns the directory path for storing object versions
func (s3a *S3ApiServer) getVersionedObjectDir(bucket, object string) string {
return path.Join(s3a.option.BucketsPath, bucket, object+".versions")
return path.Join(s3a.option.BucketsPath, bucket, object+s3_constants.VersionsFolder)
}
// getVersionFileName returns the filename for a specific version
@ -116,7 +116,7 @@ func (s3a *S3ApiServer) createDeleteMarker(bucket, object string) (string, error
// Make sure to clean up the object path to remove leading slashes
cleanObject := strings.TrimPrefix(object, "/")
bucketDir := s3a.option.BucketsPath + "/" + bucket
versionsDir := bucketDir + "/" + cleanObject + ".versions"
versionsDir := bucketDir + "/" + cleanObject + s3_constants.VersionsFolder
// Create the delete marker entry in the .versions directory
err := s3a.mkFile(versionsDir, versionFileName, nil, func(entry *filer_pb.Entry) {
@ -301,9 +301,9 @@ func (s3a *S3ApiServer) findVersionsRecursively(currentPath, relativePath string
}
// Check if this is a .versions directory
if strings.HasSuffix(entry.Name, ".versions") {
if strings.HasSuffix(entry.Name, s3_constants.VersionsFolder) {
// Extract object name from .versions directory name
objectKey := strings.TrimSuffix(entryPath, ".versions")
objectKey := strings.TrimSuffix(entryPath, s3_constants.VersionsFolder)
normalizedObjectKey := removeDuplicateSlashes(objectKey)
// Mark both keys as processed for backward compatibility
processedObjects[objectKey] = true
@ -419,7 +419,7 @@ func (s3a *S3ApiServer) findVersionsRecursively(currentPath, relativePath string
}
// Check if a .versions directory exists for this object
versionsObjectPath := normalizedObjectKey + ".versions"
versionsObjectPath := normalizedObjectKey + s3_constants.VersionsFolder
_, versionsErr := s3a.getEntry(currentPath, versionsObjectPath)
if versionsErr == nil {
// .versions directory exists
@ -497,7 +497,7 @@ func (s3a *S3ApiServer) getObjectVersionList(bucket, object string) ([]*ObjectVe
// All versions are now stored in the .versions directory only
bucketDir := s3a.option.BucketsPath + "/" + bucket
versionsObjectPath := object + ".versions"
versionsObjectPath := object + s3_constants.VersionsFolder
glog.V(2).Infof("getObjectVersionList: checking versions directory %s", versionsObjectPath)
// Get the .versions directory entry to read latest version metadata
@ -676,7 +676,7 @@ func (s3a *S3ApiServer) deleteSpecificObjectVersion(bucket, object, versionId st
versionFile := s3a.getVersionFileName(versionId)
// Check if this is the latest version before attempting deletion (for potential metadata update)
versionsEntry, dirErr := s3a.getEntry(path.Join(s3a.option.BucketsPath, bucket), normalizedObject+".versions")
versionsEntry, dirErr := s3a.getEntry(path.Join(s3a.option.BucketsPath, bucket), normalizedObject+s3_constants.VersionsFolder)
isLatestVersion := false
if dirErr == nil && versionsEntry.Extended != nil {
if latestVersionIdBytes, hasLatest := versionsEntry.Extended[s3_constants.ExtLatestVersionIdKey]; hasLatest {
@ -715,7 +715,7 @@ func (s3a *S3ApiServer) deleteSpecificObjectVersion(bucket, object, versionId st
func (s3a *S3ApiServer) updateLatestVersionAfterDeletion(bucket, object string) error {
bucketDir := s3a.option.BucketsPath + "/" + bucket
cleanObject := strings.TrimPrefix(object, "/")
versionsObjectPath := cleanObject + ".versions"
versionsObjectPath := cleanObject + s3_constants.VersionsFolder
versionsDir := bucketDir + "/" + versionsObjectPath
glog.V(1).Infof("updateLatestVersionAfterDeletion: updating latest version for %s/%s, listing %s", bucket, object, versionsDir)
@ -847,7 +847,7 @@ func (s3a *S3ApiServer) getLatestObjectVersion(bucket, object string) (*filer_pb
normalizedObject := removeDuplicateSlashes(object)
bucketDir := s3a.option.BucketsPath + "/" + bucket
versionsObjectPath := normalizedObject + ".versions"
versionsObjectPath := normalizedObject + s3_constants.VersionsFolder
glog.V(1).Infof("getLatestObjectVersion: looking for latest version of %s/%s (normalized: %s)", bucket, object, normalizedObject)

10
weed/util/net_timeout.go

@ -13,7 +13,7 @@ const (
// minThroughputBytesPerSecond defines the minimum expected throughput (4KB/s)
// Used to calculate timeout scaling based on data transferred
minThroughputBytesPerSecond = 4000
// graceTimeCapMultiplier caps the grace period for slow clients at 3x base timeout
// This prevents indefinite connections while allowing time for server-side chunk fetches
graceTimeCapMultiplier = 3
@ -90,17 +90,17 @@ func (c *Conn) Write(b []byte) (count int, e error) {
// Calculate timeout with two components:
// 1. Base timeout scaled by cumulative data (minimum throughput of 4KB/s)
// 2. Additional grace period if there was a gap since last write (for chunk fetch delays)
// Calculate expected bytes per timeout period based on minimum throughput (4KB/s)
// Example: with WriteTimeout=30s, bytesPerTimeout = 4000 * 30 = 120KB
// After writing 1MB: multiplier = 1,000,000/120,000 + 1 ≈ 9, baseTimeout = 30s * 9 = 270s
bytesPerTimeout := calculateBytesPerTimeout(c.WriteTimeout)
timeoutMultiplier := time.Duration(c.bytesWritten/bytesPerTimeout + 1)
baseTimeout := c.WriteTimeout * timeoutMultiplier
// If it's been a while since last write, add grace time for server-side chunk fetches
// But cap it to avoid keeping slow clients connected indefinitely
//
//
// The comparison uses unscaled WriteTimeout intentionally: triggers grace when idle time
// exceeds base timeout, independent of throughput scaling.
if !c.lastWrite.IsZero() {
@ -120,7 +120,7 @@ func (c *Conn) Write(b []byte) (count int, e error) {
baseTimeout += graceTime
}
}
err := c.Conn.SetWriteDeadline(now.Add(baseTimeout))
if err != nil {
return 0, err

4
weed/util/version/constants.go

@ -7,8 +7,8 @@ import (
)
var (
MAJOR_VERSION = int32(3)
MINOR_VERSION = int32(99)
MAJOR_VERSION = int32(4)
MINOR_VERSION = int32(00)
VERSION_NUMBER = fmt.Sprintf("%d.%02d", MAJOR_VERSION, MINOR_VERSION)
VERSION = util.SizeLimit + " " + VERSION_NUMBER
COMMIT = ""

Loading…
Cancel
Save