Browse Source

Merge branch 'master' into fix-volume-fsck

pull/8015/head
Jaehoon Kim 3 days ago
parent
commit
301765905e
  1. 1
      .gitignore
  2. 79
      README.md
  3. 16
      go.mod
  4. 30
      go.sum
  5. 27
      test/s3/iam/Makefile
  6. 114
      test/s3/iam/iam_config.json
  7. 6
      test/s3/iam/iam_config.local.json
  8. 8
      test/s3/iam/s3_iam_distributed_test.go
  9. 357
      test/s3/iam/s3_sts_assume_role_test.go
  10. 291
      test/s3/iam/s3_sts_ldap_test.go
  11. 82
      test/s3/iam/setup_all_tests.sh
  12. 2
      test/s3/iam/setup_keycloak.sh
  13. 4
      weed/iam/integration/advanced_policy_test.go
  14. 6
      weed/iam/integration/iam_integration_test.go
  15. 18
      weed/iam/integration/iam_manager.go
  16. 43
      weed/iam/integration/iam_manager_trust.go
  17. 571
      weed/iam/ldap/ldap_provider.go
  18. 24
      weed/iam/sts/cross_instance_token_test.go
  19. 24
      weed/iam/sts/distributed_sts_test.go
  20. 15
      weed/iam/sts/provider_factory.go
  21. 14
      weed/iam/sts/sts_service.go
  22. 5
      weed/pb/master.proto
  23. 185
      weed/pb/master_pb/master.pb.go
  24. 10
      weed/pb/volume_server.proto
  25. 1133
      weed/pb/volume_server_pb/volume_server.pb.go
  26. 777
      weed/pb/volume_server_pb/volume_server_grpc.pb.go
  27. 24
      weed/remote_storage/gcs/gcs_storage_client.go
  28. 15
      weed/s3api/auth_credentials_trust.go
  29. 12
      weed/s3api/auth_signature_v4_sts_test.go
  30. 8
      weed/s3api/s3_end_to_end_test.go
  31. 9
      weed/s3api/s3_iam_middleware.go
  32. 10
      weed/s3api/s3_jwt_auth_test.go
  33. 4
      weed/s3api/s3_multipart_iam_test.go
  34. 6
      weed/s3api/s3_presigned_url_iam_test.go
  35. 40
      weed/s3api/s3api_server.go
  36. 4
      weed/s3api/s3api_server_routing_test.go
  37. 429
      weed/s3api/s3api_sts.go
  38. 4
      weed/security/jwt.go
  39. 143
      weed/server/filer_jwt_test.go
  40. 40
      weed/server/filer_server_handlers.go
  41. 1
      weed/server/master_grpc_server.go
  42. 13
      weed/server/volume_grpc_client_to_master.go
  43. 9
      weed/storage/disk_location.go
  44. 76
      weed/storage/store.go
  45. 71
      weed/storage/store_state.go

1
.gitignore

@ -137,3 +137,4 @@ test/s3/remote_cache/primary-server.pid
# ID and PID files
*.id
*.pid
test/s3/iam/.test_env

79
README.md

@ -122,7 +122,7 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two
1. to store billions of files!
2. to serve the files fast!
SeaweedFS started as an Object Store to handle small files efficiently.
SeaweedFS started as a blob store to handle small files efficiently.
Instead of managing all file metadata in a central master,
the central master only manages volumes on volume servers,
and these volume servers manage files and their metadata.
@ -134,16 +134,12 @@ It is so simple with O(1) disk reads that you are welcome to challenge the perfo
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf).
Also, SeaweedFS implements erasure coding with ideas from
[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf)
[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf) and [Google's Colossus File System](https://cloud.google.com/blog/products/storage-data-transfer/a-peek-behind-colossus-googles-file-system)
On top of the object store, optional [Filer] can support directories and POSIX attributes.
On top of the blob store, optional [Filer] can support directories and POSIX attributes.
Filer is a separate linearly-scalable stateless server with customizable metadata stores,
e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, Sqlite, MemSql, TiDB, Etcd, CockroachDB, YDB, etc.
For any distributed key value stores, the large values can be offloaded to SeaweedFS.
With the fast access speed and linearly scalable capacity,
SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore].
SeaweedFS can transparently integrate with the cloud.
With hot data on local cluster, and warm data on the cloud with O(1) access time,
SeaweedFS can achieve both fast local access time and elastic cloud storage capacity.
@ -153,13 +149,13 @@ Faster and cheaper than direct cloud storage!
[Back to TOC](#table-of-contents)
# Features #
## Additional Features ##
* Can choose no replication or different replication levels, rack and data center aware.
## Additional Blob Store Features ##
* Support different replication levels, with rack and data center aware.
* Automatic master servers failover - no single point of failure (SPOF).
* Automatic Gzip compression depending on file MIME type.
* Automatic compression depending on file MIME type.
* Automatic compaction to reclaim disk space after deletion or update.
* [Automatic entry TTL expiration][VolumeServerTTL].
* Any server with some disk space can add to the total storage space.
* Flexible Capacity Expansion: Any server with some disk space can add to the total storage space.
* Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands.
* Optional picture resizing.
* Support ETag, Accept-Range, Last-Modified, etc.
@ -167,7 +163,7 @@ Faster and cheaper than direct cloud storage!
* Support rebalancing the writable and readonly volumes.
* [Customizable Multiple Storage Tiers][TieredStorage]: Customizable storage disk types to balance performance and cost.
* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data.
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability.
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. Enterprise version can customize EC ratio.
[Back to TOC](#table-of-contents)
@ -213,7 +209,7 @@ Faster and cheaper than direct cloud storage!
[Back to TOC](#table-of-contents)
## Example: Using Seaweed Object Store ##
## Example: Using Seaweed Blob Store ##
By default, the master node runs on port 9333, and the volume nodes run on port 8080.
Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example.
@ -233,23 +229,25 @@ SeaweedFS uses HTTP REST operations to read, write, and delete. The responses ar
> weed volume -dir="/tmp/data2" -max=10 -master="localhost:9333" -port=8081 &
```
### Write File ###
### Write A Blob ###
A blob, also referred as a needle, a chunk, or mistakenly as a file, is just a byte array. It can have attributes, such as name, mime type, create or update time, etc. But basically it is just a byte array of a relatively small size, such as 2 MB ~ 64 MB. The size is not fixed.
To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL:
To upload a blob: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL:
```
> curl http://localhost:9333/dir/assign
{"count":1,"fid":"3,01637037d6","url":"127.0.0.1:8080","publicUrl":"localhost:8080"}
```
Second, to store the file content, send a HTTP multi-part POST request to `url + '/' + fid` from the response:
Second, to store the blob content, send a HTTP multi-part POST request to `url + '/' + fid` from the response:
```
> curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6
{"name":"myphoto.jpg","size":43234,"eTag":"1cc0118e"}
```
To update, send another POST request with updated file content.
To update, send another POST request with updated blob content.
For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL:
@ -257,7 +255,7 @@ For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL:
> curl -X DELETE http://127.0.0.1:8080/3,01637037d6
```
### Save File Id ###
### Save Blob Id ###
Now, you can save the `fid`, 3,01637037d6 in this case, to a database field.
@ -269,9 +267,9 @@ The file key and file cookie are both coded in hex. You can store the <volume id
If stored as a string, in theory, you would need 8+1+16+8=33 bytes. A char(33) would be enough, if not more than enough, since most uses will not need 2^32 volumes.
If space is really a concern, you can store the file id in your own format. You would need one 4-byte integer for volume id, 8-byte long number for file key, and a 4-byte integer for the file cookie. So 16 bytes are more than enough.
If space is really a concern, you can store the file id in the binary format. You would need one 4-byte integer for volume id, 8-byte long number for file key, and a 4-byte integer for the file cookie. So 16 bytes are more than enough.
### Read File ###
### Read a Blob ###
Here is an example of how to render the URL.
@ -312,7 +310,7 @@ http://localhost:8080/3/01637037d6.jpg?height=200&width=200&mode=fill
### Rack-Aware and Data Center-Aware Replication ###
SeaweedFS applies the replication strategy at a volume level. So, when you are getting a file id, you can specify the replication strategy. For example:
SeaweedFS applies the replication strategy at a volume level. So, when you are getting a blob id, you can specify the replication strategy. For example:
```
curl http://localhost:9333/dir/assign?replication=001
@ -335,7 +333,7 @@ More details about replication can be found [on the wiki][Replication].
You can also set the default replication strategy when starting the master server.
### Allocate File Key on Specific Data Center ###
### Allocate Blob Key on Specific Data Center ###
Volume servers can be started with a specific data center name:
@ -344,7 +342,7 @@ Volume servers can be started with a specific data center name:
weed volume -dir=/tmp/2 -port=8081 -dataCenter=dc2
```
When requesting a file key, an optional "dataCenter" parameter can limit the assigned volume to the specific data center. For example, this specifies that the assigned volume should be limited to 'dc1':
When requesting a blob key, an optional "dataCenter" parameter can limit the assigned volume to the specific data center. For example, this specifies that the assigned volume should be limited to 'dc1':
```
http://localhost:9333/dir/assign?dataCenter=dc1
@ -363,15 +361,15 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass
[Back to TOC](#table-of-contents)
## Object Store Architecture ##
## Blob Store Architecture ##
Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has.
Usually distributed file systems split each file into chunks. A central server keeps a mapping of filenames to chunks, and also which chunks each chunk server has.
The main drawback is that the central master can't handle many small files efficiently, and since all read requests need to go through the chunk master, so it might not scale well for many concurrent users.
The main drawback is that the central server can't handle many small files efficiently, and since all read requests need to go through the central master, so it might not scale well for many concurrent users.
Instead of managing chunks, SeaweedFS manages data volumes in the master server. Each data volume is 32GB in size, and can hold a lot of files. And each storage node can have many data volumes. So the master node only needs to store the metadata about the volumes, which is a fairly small amount of data and is generally stable.
Instead of managing chunks, SeaweedFS manages data volumes in the master server. Each data volume is 32GB in size, and can hold a lot of blobs. And each storage node can have many data volumes. So the master node only needs to store the metadata about the volumes, which is a fairly small amount of data and is generally stable.
The actual file metadata is stored in each volume on volume servers. Since each volume server only manages metadata of files on its own disk, with only 16 bytes for each file, all file access can read file metadata just from memory and only needs one disk operation to actually read file data.
The actual blob metadata, which are the blob volume, offset, and size, is stored in each volume on volume servers. Since each volume server only manages metadata of blobs on its own disk, with only 16 bytes for each blob, all access can read the metadata just from memory and only needs one disk operation to actually read file data.
For comparison, consider that an xfs inode structure in Linux is 536 bytes.
@ -385,23 +383,13 @@ On each write request, the master server also generates a file key, which is a g
### Write and Read files ###
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node URL) for the file. The client then contacts the volume node and POSTs the file content.
When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node URL, volume node public URL), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
Please see the example for details on the write-read process.
### Storage Size ###
In the current implementation, each volume can hold 32 gibibytes (32GiB or 8x2^32 bytes). This is because we align content to 8 bytes. We can easily increase this to 64GiB, or 128GiB, or more, by changing 2 lines of code, at the cost of some wasted padding space due to alignment.
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node URL) for the blob. The client then contacts the volume node and POSTs the blob content.
There can be 4 gibibytes (4GiB or 2^32 bytes) of volumes. So the total system size is 8 x 4GiB x 4GiB which is 128 exbibytes (128EiB or 2^67 bytes).
Each individual file size is limited to the volume size.
When a client needs to read a blob based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node URL, volume node public URL), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
### Saving memory ###
All file meta information stored on a volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
All blob metadata stored on a volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
### Tiered Storage to the cloud ###
@ -415,6 +403,12 @@ If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage
[Back to TOC](#table-of-contents)
## SeaweedFS Filer ##
Built on top of the blob store, SeaweedFS Filer adds directory structure to create a file system. The directory sturcture is an interface that is implemented in many key-value stores or databases.
The content of a file is mapped to one or many blobs, distributed to multiple volumes on multiple volume servers.
## Compared to Other File Systems ##
Most other distributed file systems seem more complicated than necessary.
@ -661,5 +655,4 @@ The text of this page is available for modification and reuse under the terms of
[Back to TOC](#table-of-contents)
## Stargazers over time
[![Stargazers over time](https://starchart.cc/chrislusf/seaweedfs.svg)](https://starchart.cc/chrislusf/seaweedfs)
[![Stargazers over time](https://starchart.cc/seaweedfs/seaweedfs.svg?variant=adaptive)](https://starchart.cc/seaweedfs/seaweedfs)

16
go.mod

@ -49,7 +49,7 @@ require (
github.com/json-iterator/go v1.1.12
github.com/karlseguin/ccache/v2 v2.0.8
github.com/klauspost/compress v1.18.2
github.com/klauspost/reedsolomon v1.12.6
github.com/klauspost/reedsolomon v1.13.0
github.com/kurin/blazer v0.5.3
github.com/linxGnu/grocksdb v1.10.3
github.com/mailru/easyjson v0.7.7 // indirect
@ -105,7 +105,7 @@ require (
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/api v0.258.0
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect
google.golang.org/grpc v1.77.0
google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.11
gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect
@ -133,6 +133,7 @@ require (
github.com/getsentry/sentry-go v0.40.0
github.com/gin-contrib/sessions v1.0.4
github.com/gin-gonic/gin v1.11.0
github.com/go-ldap/ldap/v3 v3.4.12
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
github.com/hashicorp/raft v1.7.3
@ -141,10 +142,10 @@ require (
github.com/jhump/protoreflect v1.17.0
github.com/lib/pq v1.10.9
github.com/linkedin/goavro/v2 v2.14.1
github.com/mattn/go-sqlite3 v1.14.32
github.com/mattn/go-sqlite3 v1.14.33
github.com/minio/crc64nvme v1.1.1
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/parquet-go/parquet-go v0.26.3
github.com/parquet-go/parquet-go v0.26.4
github.com/pkg/sftp v1.13.10
github.com/rabbitmq/amqp091-go v1.10.0
github.com/rclone/rclone v1.71.2
@ -152,7 +153,7 @@ require (
github.com/redis/go-redis/v9 v9.17.2
github.com/schollz/progressbar/v3 v3.19.0
github.com/seaweedfs/go-fuse/v2 v2.9.1
github.com/shirou/gopsutil/v4 v4.25.11
github.com/shirou/gopsutil/v4 v4.25.12
github.com/tarantool/go-tarantool/v2 v2.4.1
github.com/tikv/client-go/v2 v2.0.7
github.com/xeipuuv/gojsonschema v1.2.0
@ -183,6 +184,7 @@ require (
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/version v0.0.0-20250314144055-3860cd14adf2 // indirect
github.com/dave/dst v0.27.2 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
github.com/goccy/go-yaml v1.18.0 // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
github.com/google/go-cmp v0.7.0 // indirect
@ -367,7 +369,7 @@ require (
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats.go v1.43.0 // indirect
@ -448,7 +450,7 @@ require (
golang.org/x/arch v0.20.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/time v0.14.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect

30
go.sum

@ -643,6 +643,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
@ -936,6 +938,8 @@ github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68=
@ -957,6 +961,8 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
@ -1328,8 +1334,8 @@ github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxh
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/reedsolomon v1.12.6 h1:8pqE9aECQG/ZFitiUD1xK/E83zwosBAZtE3UbuZM8TQ=
github.com/klauspost/reedsolomon v1.12.6/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA=
github.com/klauspost/reedsolomon v1.13.0 h1:E0Cmgf2kMuhZTj6eefnvpKC4/Q4jhCi9YIjcZjK4arc=
github.com/klauspost/reedsolomon v1.13.0/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -1390,8 +1396,8 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0=
github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
@ -1472,8 +1478,8 @@ github.com/parquet-go/bitpack v1.0.0 h1:AUqzlKzPPXf2bCdjfj4sTeacrUwsT7NlcYDMUQxP
github.com/parquet-go/bitpack v1.0.0/go.mod h1:XnVk9TH+O40eOOmvpAVZ7K2ocQFrQwysLMnc6M/8lgs=
github.com/parquet-go/jsonlite v1.0.0 h1:87QNdi56wOfsE5bdgas0vRzHPxfJgzrXGml1zZdd7VU=
github.com/parquet-go/jsonlite v1.0.0/go.mod h1:nDjpkpL4EOtqs6NQugUsi0Rleq9sW/OtC1NnZEnxzF0=
github.com/parquet-go/parquet-go v0.26.3 h1:kJY+xmjcR7BH77tyHqasJpIl3kch/6EIO3TW4tFj69M=
github.com/parquet-go/parquet-go v0.26.3/go.mod h1:h9GcSt41Knf5qXI1tp1TfR8bDBUtvdUMzSKe26aZcHk=
github.com/parquet-go/parquet-go v0.26.4 h1:zJ3l8ef5WJZE2m63pKwyEJ2BhyDlgS0PfOEhuCQQU2A=
github.com/parquet-go/parquet-go v0.26.4/go.mod h1:h9GcSt41Knf5qXI1tp1TfR8bDBUtvdUMzSKe26aZcHk=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
@ -1629,8 +1635,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil/v4 v4.25.11 h1:X53gB7muL9Gnwwo2evPSE+SfOrltMoR6V3xJAXZILTY=
github.com/shirou/gopsutil/v4 v4.25.11/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU=
github.com/shirou/gopsutil/v4 v4.25.12 h1:e7PvW/0RmJ8p8vPGJH4jvNkOyLmbkXgXW4m6ZPic6CY=
github.com/shirou/gopsutil/v4 v4.25.12/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@ -2556,8 +2562,8 @@ google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 h1:Nt6z9UHqSlIdIGJdz6KhTIs2VRx/iOsA5iE8bmQNcxs=
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79/go.mod h1:kTmlBHMPqR5uCZPBvwa2B18mvubkjyY3CRLI0c6fj0s=
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE=
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -2600,8 +2606,8 @@ google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsA
google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds=
google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8=

27
test/s3/iam/Makefile

@ -57,6 +57,10 @@ setup: ## Setup test environment
@echo "Setting up test environment..."
@mkdir -p test-volume-data/filerldb2
@mkdir -p test-volume-data/m9333
@if [ ! -f iam_config.json ]; then \
echo "Creating iam_config.json from iam_config.local.json..."; \
cp iam_config.local.json iam_config.json; \
fi
start-services: ## Start SeaweedFS services for testing
@echo "Starting SeaweedFS services using weed mini..."
@ -125,6 +129,10 @@ clean: stop-services ## Clean up test environment
@rm -rf test-volume-data
@rm -f weed-*.log
@rm -f *.test
@rm -f iam_config.json
@rm -f .test_env
@docker rm -f keycloak-iam-test >/dev/null 2>&1 || true
@docker rm -f openldap-iam-test >/dev/null 2>&1 || true
@echo "Cleanup complete"
logs: ## Show service logs
@ -176,6 +184,20 @@ test-context: ## Test only contextual policy enforcement
test-presigned: ## Test only presigned URL integration
go test -v -run TestS3IAMPresignedURLIntegration ./...
test-sts: ## Run all STS tests
go test -v -run "TestSTS" ./...
test-sts-assume-role: ## Run AssumeRole STS tests
go test -v -run "TestSTSAssumeRole" ./...
test-sts-ldap: ## Run LDAP STS tests
go test -v -run "TestSTSLDAP" ./...
test-sts-suite: start-services ## Run all STS tests with full environment setup/teardown
@echo "Running STS test suite..."
-go test -v -run "TestSTS" ./...
@$(MAKE) stop-services
# Performance testing
benchmark: setup start-services wait-for-services ## Run performance benchmarks
@echo "🏁 Running IAM performance benchmarks..."
@ -240,7 +262,7 @@ docker-build: ## Build custom SeaweedFS image for Docker tests
# All PHONY targets
.PHONY: test test-quick run-tests setup start-services stop-services wait-for-services clean logs status debug
.PHONY: test-auth test-policy test-expiration test-multipart test-bucket-policy test-context test-presigned
.PHONY: test-auth test-policy test-expiration test-multipart test-bucket-policy test-context test-presigned test-sts test-sts-assume-role test-sts-ldap
.PHONY: benchmark ci watch install-deps docker-test docker-up docker-down docker-logs docker-build
.PHONY: test-distributed test-performance test-stress test-versioning-stress test-keycloak-full test-all-previously-skipped setup-all-tests help-advanced
@ -275,6 +297,9 @@ test-all-previously-skipped: ## Run all previously skipped tests
@echo "🎯 Running all previously skipped tests..."
@./run_all_tests.sh
.PHONY: cleanup
cleanup: clean
setup-all-tests: ## Setup environment for all tests (including Keycloak)
@echo "🚀 Setting up complete test environment..."
@./setup_all_tests.sh

114
test/s3/iam/iam_config.json

@ -1,7 +1,7 @@
{
"sts": {
"tokenDuration": "1h",
"maxSessionLength": "12h",
"maxSessionLength": "12h",
"issuer": "seaweedfs-sts",
"signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc="
},
@ -24,7 +24,11 @@
"clientSecret": "seaweedfs-s3-secret",
"jwksUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/certs",
"userInfoUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/userinfo",
"scopes": ["openid", "profile", "email"],
"scopes": [
"openid",
"profile",
"email"
],
"claimsMapping": {
"username": "preferred_username",
"email": "email",
@ -38,13 +42,13 @@
"role": "arn:aws:iam::role/KeycloakAdminRole"
},
{
"claim": "roles",
"claim": "roles",
"value": "s3-read-only",
"role": "arn:aws:iam::role/KeycloakReadOnlyRole"
},
{
"claim": "roles",
"value": "s3-write-only",
"value": "s3-write-only",
"role": "arn:aws:iam::role/KeycloakWriteOnlyRole"
},
{
@ -73,15 +77,19 @@
"Principal": {
"Federated": "test-oidc"
},
"Action": ["sts:AssumeRoleWithWebIdentity"]
"Action": [
"sts:AssumeRoleWithWebIdentity"
]
}
]
},
"attachedPolicies": ["S3AdminPolicy"],
"attachedPolicies": [
"S3AdminPolicy"
],
"description": "Admin role for testing"
},
{
"roleName": "TestReadOnlyRole",
"roleName": "TestReadOnlyRole",
"roleArn": "arn:aws:iam::role/TestReadOnlyRole",
"trustPolicy": {
"Version": "2012-10-17",
@ -91,15 +99,19 @@
"Principal": {
"Federated": "test-oidc"
},
"Action": ["sts:AssumeRoleWithWebIdentity"]
"Action": [
"sts:AssumeRoleWithWebIdentity"
]
}
]
},
"attachedPolicies": ["S3ReadOnlyPolicy"],
"attachedPolicies": [
"S3ReadOnlyPolicy"
],
"description": "Read-only role for testing"
},
{
"roleName": "TestWriteOnlyRole",
"roleName": "TestWriteOnlyRole",
"roleArn": "arn:aws:iam::role/TestWriteOnlyRole",
"trustPolicy": {
"Version": "2012-10-17",
@ -109,11 +121,15 @@
"Principal": {
"Federated": "test-oidc"
},
"Action": ["sts:AssumeRoleWithWebIdentity"]
"Action": [
"sts:AssumeRoleWithWebIdentity"
]
}
]
},
"attachedPolicies": ["S3WriteOnlyPolicy"],
"attachedPolicies": [
"S3WriteOnlyPolicy"
],
"description": "Write-only role for testing"
},
{
@ -127,11 +143,15 @@
"Principal": {
"Federated": "keycloak"
},
"Action": ["sts:AssumeRoleWithWebIdentity"]
"Action": [
"sts:AssumeRoleWithWebIdentity"
]
}
]
},
"attachedPolicies": ["S3AdminPolicy"],
"attachedPolicies": [
"S3AdminPolicy"
],
"description": "Admin role for Keycloak users"
},
{
@ -145,11 +165,15 @@
"Principal": {
"Federated": "keycloak"
},
"Action": ["sts:AssumeRoleWithWebIdentity"]
"Action": [
"sts:AssumeRoleWithWebIdentity"
]
}
]
},
"attachedPolicies": ["S3ReadOnlyPolicy"],
"attachedPolicies": [
"S3ReadOnlyPolicy"
],
"description": "Read-only role for Keycloak users"
},
{
@ -163,11 +187,15 @@
"Principal": {
"Federated": "keycloak"
},
"Action": ["sts:AssumeRoleWithWebIdentity"]
"Action": [
"sts:AssumeRoleWithWebIdentity"
]
}
]
},
"attachedPolicies": ["S3WriteOnlyPolicy"],
"attachedPolicies": [
"S3WriteOnlyPolicy"
],
"description": "Write-only role for Keycloak users"
},
{
@ -181,11 +209,15 @@
"Principal": {
"Federated": "keycloak"
},
"Action": ["sts:AssumeRoleWithWebIdentity"]
"Action": [
"sts:AssumeRoleWithWebIdentity"
]
}
]
},
"attachedPolicies": ["S3ReadWritePolicy"],
"attachedPolicies": [
"S3ReadWritePolicy"
],
"description": "Read-write role for Keycloak users"
}
],
@ -197,13 +229,21 @@
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:*"],
"Resource": ["*"]
"Action": [
"s3:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": ["sts:ValidateSession"],
"Resource": ["*"]
"Action": [
"sts:ValidateSession"
],
"Resource": [
"*"
]
}
]
}
@ -211,7 +251,7 @@
{
"name": "S3ReadOnlyPolicy",
"document": {
"Version": "2012-10-17",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
@ -226,8 +266,12 @@
},
{
"Effect": "Allow",
"Action": ["sts:ValidateSession"],
"Resource": ["*"]
"Action": [
"sts:ValidateSession"
],
"Resource": [
"*"
]
}
]
}
@ -260,8 +304,12 @@
},
{
"Effect": "Allow",
"Action": ["sts:ValidateSession"],
"Resource": ["*"]
"Action": [
"sts:ValidateSession"
],
"Resource": [
"*"
]
}
]
}
@ -283,8 +331,12 @@
},
{
"Effect": "Allow",
"Action": ["sts:ValidateSession"],
"Resource": ["*"]
"Action": [
"sts:ValidateSession"
],
"Resource": [
"*"
]
}
]
}

6
test/s3/iam/iam_config.local.json

@ -19,11 +19,11 @@
"type": "oidc",
"enabled": true,
"config": {
"issuer": "http://localhost:8090/realms/seaweedfs-test",
"issuer": "http://localhost:8080/realms/seaweedfs-test",
"clientId": "seaweedfs-s3",
"clientSecret": "seaweedfs-s3-secret",
"jwksUri": "http://localhost:8090/realms/seaweedfs-test/protocol/openid-connect/certs",
"userInfoUri": "http://localhost:8090/realms/seaweedfs-test/protocol/openid-connect/userinfo",
"jwksUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/certs",
"userInfoUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/userinfo",
"scopes": [
"openid",
"profile",

8
test/s3/iam/s3_iam_distributed_test.go

@ -30,10 +30,10 @@ func TestS3IAMDistributedTests(t *testing.T) {
// Create S3 clients that would connect to different gateway instances
// In a real distributed setup, these would point to different S3 gateway ports
client1, err := framework.CreateS3ClientWithJWT("test-user", "TestAdminRole")
client1, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole")
require.NoError(t, err)
client2, err := framework.CreateS3ClientWithJWT("test-user", "TestAdminRole")
client2, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole")
require.NoError(t, err)
// Both clients should be able to perform operations
@ -70,7 +70,7 @@ func TestS3IAMDistributedTests(t *testing.T) {
adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole")
require.NoError(t, err)
readOnlyClient, err := framework.CreateS3ClientWithJWT("readonly-user", "TestReadOnlyRole")
readOnlyClient, err := framework.CreateS3ClientWithJWT("read-user", "TestReadOnlyRole")
require.NoError(t, err)
bucketName := "test-distributed-roles"
@ -160,7 +160,7 @@ func TestS3IAMDistributedTests(t *testing.T) {
go func(goroutineID int) {
defer wg.Done()
client, err := framework.CreateS3ClientWithJWT(fmt.Sprintf("user-%d", goroutineID), "TestAdminRole")
client, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole")
if err != nil {
errors <- fmt.Errorf("failed to create S3 client for goroutine %d: %w", goroutineID, err)
return

357
test/s3/iam/s3_sts_assume_role_test.go

@ -0,0 +1,357 @@
package iam
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
v4 "github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// AssumeRoleResponse represents the STS AssumeRole response
type AssumeRoleTestResponse struct {
XMLName xml.Name `xml:"AssumeRoleResponse"`
Result struct {
Credentials struct {
AccessKeyId string `xml:"AccessKeyId"`
SecretAccessKey string `xml:"SecretAccessKey"`
SessionToken string `xml:"SessionToken"`
Expiration string `xml:"Expiration"`
} `xml:"Credentials"`
AssumedRoleUser struct {
AssumedRoleId string `xml:"AssumedRoleId"`
Arn string `xml:"Arn"`
} `xml:"AssumedRoleUser"`
} `xml:"AssumeRoleResult"`
}
// TestSTSAssumeRoleValidation tests input validation for AssumeRole endpoint
func TestSTSAssumeRoleValidation(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if !isSTSEndpointRunning(t) {
t.Fatal("SeaweedFS STS endpoint is not running at", TestSTSEndpoint, "- please run 'make setup-all-tests' first")
}
// Check if AssumeRole is implemented by making a test call
if !isAssumeRoleImplemented(t) {
t.Fatal("AssumeRole action is not implemented in the running server - please rebuild weed binary with new code and restart the server")
}
t.Run("missing_role_arn", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleSessionName": {"test-session"},
// RoleArn is missing
}, "test-access-key", "test-secret-key")
require.NoError(t, err)
defer resp.Body.Close()
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail without RoleArn")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
assert.Equal(t, "MissingParameter", errResp.Error.Code)
})
t.Run("missing_role_session_name", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test-role"},
// RoleSessionName is missing
}, "test-access-key", "test-secret-key")
require.NoError(t, err)
defer resp.Body.Close()
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail without RoleSessionName")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
assert.Equal(t, "MissingParameter", errResp.Error.Code)
})
t.Run("unsupported_action_for_anonymous", func(t *testing.T) {
// AssumeRole requires SigV4 authentication, anonymous requests should fail
resp, err := callSTSAPI(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test-role"},
"RoleSessionName": {"test-session"},
})
require.NoError(t, err)
defer resp.Body.Close()
// Should fail because AssumeRole requires AWS SigV4 authentication
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"AssumeRole should require authentication")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response for anonymous AssumeRole: status=%d, body=%s", resp.StatusCode, string(body))
})
t.Run("invalid_duration_too_short", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test-role"},
"RoleSessionName": {"test-session"},
"DurationSeconds": {"100"}, // Less than 900 seconds minimum
}, "test-access-key", "test-secret-key")
require.NoError(t, err)
defer resp.Body.Close()
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail with DurationSeconds < 900")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
assert.Equal(t, "InvalidParameterValue", errResp.Error.Code)
})
t.Run("invalid_duration_too_long", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test-role"},
"RoleSessionName": {"test-session"},
"DurationSeconds": {"100000"}, // More than 43200 seconds maximum
}, "test-access-key", "test-secret-key")
require.NoError(t, err)
defer resp.Body.Close()
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail with DurationSeconds > 43200")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
assert.Equal(t, "InvalidParameterValue", errResp.Error.Code)
})
}
// isAssumeRoleImplemented checks if the running server supports AssumeRole
func isAssumeRoleImplemented(t *testing.T) bool {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test"},
"RoleSessionName": {"test"},
}, "test", "test")
if err != nil {
return false
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return false
}
// If we get "NotImplemented", the action isn't supported
var errResp STSErrorTestResponse
if xml.Unmarshal(body, &errResp) == nil && errResp.Error.Code == "NotImplemented" {
return false
}
// If we get InvalidAction, the action isn't routed
if errResp.Error.Code == "InvalidAction" {
return false
}
return true
}
// TestSTSAssumeRoleWithValidCredentials tests AssumeRole with valid IAM credentials
// This test requires a configured IAM user in SeaweedFS
func TestSTSAssumeRoleWithValidCredentials(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if !isSTSEndpointRunning(t) {
t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint)
}
// Use test credentials from environment or fall back to defaults
accessKey := os.Getenv("STS_TEST_ACCESS_KEY")
if accessKey == "" {
accessKey = "admin"
}
secretKey := os.Getenv("STS_TEST_SECRET_KEY")
if secretKey == "" {
secretKey = "admin"
}
t.Run("successful_assume_role", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/admin"},
"RoleSessionName": {"integration-test-session"},
}, accessKey, secretKey)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response status: %d, body: %s", resp.StatusCode, string(body))
// If AssumeRole is not yet implemented, expect an error about unsupported action
if resp.StatusCode != http.StatusOK {
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
t.Logf("Error response: code=%s, message=%s", errResp.Error.Code, errResp.Error.Message)
// This test will initially fail until AssumeRole is implemented
// Once implemented, uncomment the assertions below
// assert.Fail(t, "AssumeRole not yet implemented")
} else {
var stsResp AssumeRoleTestResponse
err = xml.Unmarshal(body, &stsResp)
require.NoError(t, err, "Failed to parse response: %s", string(body))
creds := stsResp.Result.Credentials
assert.NotEmpty(t, creds.AccessKeyId, "AccessKeyId should not be empty")
assert.NotEmpty(t, creds.SecretAccessKey, "SecretAccessKey should not be empty")
assert.NotEmpty(t, creds.SessionToken, "SessionToken should not be empty")
assert.NotEmpty(t, creds.Expiration, "Expiration should not be empty")
t.Logf("Successfully obtained temporary credentials: AccessKeyId=%s", creds.AccessKeyId)
}
})
t.Run("with_custom_duration", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/admin"},
"RoleSessionName": {"duration-test-session"},
"DurationSeconds": {"3600"}, // 1 hour
}, accessKey, secretKey)
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response status: %d, body: %s", resp.StatusCode, string(body))
// Verify DurationSeconds is accepted
if resp.StatusCode != http.StatusOK {
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
// Should not fail due to DurationSeconds parameter
assert.NotContains(t, errResp.Error.Message, "DurationSeconds",
"DurationSeconds parameter should be accepted")
}
})
}
// TestSTSAssumeRoleWithInvalidCredentials tests AssumeRole rejection with bad credentials
func TestSTSAssumeRoleWithInvalidCredentials(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if !isSTSEndpointRunning(t) {
t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint)
}
t.Run("invalid_access_key", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/admin"},
"RoleSessionName": {"test-session"},
}, "invalid-access-key", "some-secret-key")
require.NoError(t, err)
defer resp.Body.Close()
// Should fail with access denied or signature mismatch
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail with invalid access key")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response for invalid credentials: status=%d, body=%s", resp.StatusCode, string(body))
})
t.Run("invalid_secret_key", func(t *testing.T) {
resp, err := callSTSAPIWithSigV4(t, url.Values{
"Action": {"AssumeRole"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/admin"},
"RoleSessionName": {"test-session"},
}, "admin", "wrong-secret-key")
require.NoError(t, err)
defer resp.Body.Close()
// Should fail with signature mismatch
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail with invalid secret key")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response for wrong secret: status=%d, body=%s", resp.StatusCode, string(body))
})
}
// callSTSAPIWithSigV4 makes an STS API call with AWS Signature V4 authentication
func callSTSAPIWithSigV4(t *testing.T, params url.Values, accessKey, secretKey string) (*http.Response, error) {
// Prepare request body
body := params.Encode()
// Create request
req, err := http.NewRequest(http.MethodPost, TestSTSEndpoint+"/",
strings.NewReader(body))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("Host", req.URL.Host)
// Sign request with AWS Signature V4 using official SDK
creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
signer := v4.NewSigner(creds)
// Read body for signing
// Note: We need a ReadSeeker for the signer, or we can pass the body string/bytes to ComputeBodyHash if needed,
// but standard Sign method takes an io.ReadSeeker for the body.
bodyReader := strings.NewReader(body)
_, err = signer.Sign(req, bodyReader, "sts", "us-east-1", time.Now())
if err != nil {
return nil, fmt.Errorf("failed to sign request: %w", err)
}
client := &http.Client{Timeout: 30 * time.Second}
return client.Do(req)
}

291
test/s3/iam/s3_sts_ldap_test.go

@ -0,0 +1,291 @@
package iam
import (
"encoding/xml"
"io"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// AssumeRoleWithLDAPIdentityResponse represents the STS response for LDAP identity
type AssumeRoleWithLDAPIdentityTestResponse struct {
XMLName xml.Name `xml:"AssumeRoleWithLDAPIdentityResponse"`
Result struct {
Credentials struct {
AccessKeyId string `xml:"AccessKeyId"`
SecretAccessKey string `xml:"SecretAccessKey"`
SessionToken string `xml:"SessionToken"`
Expiration string `xml:"Expiration"`
} `xml:"Credentials"`
} `xml:"AssumeRoleWithLDAPIdentityResult"`
}
// TestSTSLDAPValidation tests input validation for AssumeRoleWithLDAPIdentity
func TestSTSLDAPValidation(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if !isSTSEndpointRunning(t) {
t.Fatal("SeaweedFS STS endpoint is not running at", TestSTSEndpoint, "- please run 'make setup-all-tests' first")
}
// Check if AssumeRoleWithLDAPIdentity is implemented
if !isLDAPIdentityActionImplemented(t) {
t.Fatal("AssumeRoleWithLDAPIdentity action is not implemented in the running server - please rebuild weed binary with new code and restart the server")
}
t.Run("missing_ldap_username", func(t *testing.T) {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test-role"},
"RoleSessionName": {"test-session"},
"LDAPPassword": {"testpass"},
// LDAPUsername is missing
})
require.NoError(t, err)
defer resp.Body.Close()
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail without LDAPUsername")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
// Expect either MissingParameter or InvalidAction (if not implemented)
assert.Contains(t, []string{"MissingParameter", "InvalidAction"}, errResp.Error.Code)
})
t.Run("missing_ldap_password", func(t *testing.T) {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test-role"},
"RoleSessionName": {"test-session"},
"LDAPUsername": {"testuser"},
// LDAPPassword is missing
})
require.NoError(t, err)
defer resp.Body.Close()
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail without LDAPPassword")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
assert.Contains(t, []string{"MissingParameter", "InvalidAction"}, errResp.Error.Code)
})
t.Run("missing_role_arn", func(t *testing.T) {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleSessionName": {"test-session"},
"LDAPUsername": {"testuser"},
"LDAPPassword": {"testpass"},
// RoleArn is missing
})
require.NoError(t, err)
defer resp.Body.Close()
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail without RoleArn")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var errResp STSErrorTestResponse
err = xml.Unmarshal(body, &errResp)
require.NoError(t, err, "Failed to parse error response: %s", string(body))
assert.Contains(t, []string{"MissingParameter", "InvalidAction"}, errResp.Error.Code)
})
t.Run("invalid_duration_too_short", func(t *testing.T) {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test-role"},
"RoleSessionName": {"test-session"},
"LDAPUsername": {"testuser"},
"LDAPPassword": {"testpass"},
"DurationSeconds": {"100"}, // Less than 900 seconds minimum
})
require.NoError(t, err)
defer resp.Body.Close()
// If the action is implemented, it should reject invalid duration
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response for invalid duration: status=%d, body=%s", resp.StatusCode, string(body))
})
}
// TestSTSLDAPWithValidCredentials tests LDAP authentication
// This test requires an LDAP server to be configured
func TestSTSLDAPWithValidCredentials(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if !isSTSEndpointRunning(t) {
t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint)
}
// Check if LDAP is configured (skip if not)
if !isLDAPConfigured() {
t.Skip("LDAP is not configured - skipping LDAP integration tests")
}
t.Run("successful_ldap_auth", func(t *testing.T) {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/ldap-user"},
"RoleSessionName": {"ldap-test-session"},
"LDAPUsername": {"testuser"},
"LDAPPassword": {"testpass"},
})
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response status: %d, body: %s", resp.StatusCode, string(body))
if resp.StatusCode == http.StatusOK {
var stsResp AssumeRoleWithLDAPIdentityTestResponse
err = xml.Unmarshal(body, &stsResp)
require.NoError(t, err, "Failed to parse response: %s", string(body))
creds := stsResp.Result.Credentials
assert.NotEmpty(t, creds.AccessKeyId, "AccessKeyId should not be empty")
assert.NotEmpty(t, creds.SecretAccessKey, "SecretAccessKey should not be empty")
assert.NotEmpty(t, creds.SessionToken, "SessionToken should not be empty")
assert.NotEmpty(t, creds.Expiration, "Expiration should not be empty")
}
})
}
// TestSTSLDAPWithInvalidCredentials tests LDAP rejection with bad credentials
func TestSTSLDAPWithInvalidCredentials(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
if !isSTSEndpointRunning(t) {
t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint)
}
t.Run("invalid_ldap_password", func(t *testing.T) {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/ldap-user"},
"RoleSessionName": {"ldap-test-session"},
"LDAPUsername": {"testuser"},
"LDAPPassword": {"wrong-password"},
})
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response for invalid LDAP credentials: status=%d, body=%s", resp.StatusCode, string(body))
// Should fail (either AccessDenied or InvalidAction if not implemented)
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail with invalid LDAP password")
})
t.Run("nonexistent_ldap_user", func(t *testing.T) {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/ldap-user"},
"RoleSessionName": {"ldap-test-session"},
"LDAPUsername": {"nonexistent-user-12345"},
"LDAPPassword": {"somepassword"},
})
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
t.Logf("Response for nonexistent user: status=%d, body=%s", resp.StatusCode, string(body))
// Should fail
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Should fail with nonexistent LDAP user")
})
}
// callSTSAPIForLDAP makes an STS API call for LDAP operation
func callSTSAPIForLDAP(t *testing.T, params url.Values) (*http.Response, error) {
req, err := http.NewRequest(http.MethodPost, TestSTSEndpoint+"/",
strings.NewReader(params.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
client := &http.Client{Timeout: 30 * time.Second}
return client.Do(req)
}
// isLDAPConfigured checks if LDAP server is configured and available
func isLDAPConfigured() bool {
// Check environment variable for LDAP URL
ldapURL := os.Getenv("LDAP_URL")
return ldapURL != ""
}
// isLDAPIdentityActionImplemented checks if the running server supports AssumeRoleWithLDAPIdentity
func isLDAPIdentityActionImplemented(t *testing.T) bool {
resp, err := callSTSAPIForLDAP(t, url.Values{
"Action": {"AssumeRoleWithLDAPIdentity"},
"Version": {"2011-06-15"},
"RoleArn": {"arn:aws:iam::role/test"},
"RoleSessionName": {"test"},
"LDAPUsername": {"test"},
"LDAPPassword": {"test"},
})
if err != nil {
return false
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return false
}
// If we get "NotImplemented" or empty response, the action isn't supported
if len(body) == 0 {
return false
}
var errResp STSErrorTestResponse
if xml.Unmarshal(body, &errResp) == nil && errResp.Error.Code == "NotImplemented" {
return false
}
// If we get InvalidAction, the action isn't routed
if errResp.Error.Code == "InvalidAction" {
return false
}
return true
}

82
test/s3/iam/setup_all_tests.sh

@ -50,6 +50,82 @@ setup_keycloak() {
echo -e "${GREEN}[OK] Keycloak setup completed${NC}"
}
# Set up OpenLDAP for LDAP-based STS testing
setup_ldap() {
echo -e "\n${BLUE}1a. Setting up OpenLDAP for STS LDAP testing...${NC}"
# Check if LDAP container is already running
if docker ps --format '{{.Names}}' | grep -q '^openldap-iam-test$'; then
echo -e "${YELLOW}OpenLDAP container already running${NC}"
echo -e "${GREEN}[OK] LDAP setup completed (using existing container)${NC}"
return 0
fi
# Remove any stopped container with the same name
docker rm -f openldap-iam-test 2>/dev/null || true
# Start OpenLDAP container
echo -e "${YELLOW}🔧 Starting OpenLDAP container...${NC}"
docker run -d \
--name openldap-iam-test \
-p 389:389 \
-p 636:636 \
-e LDAP_ADMIN_PASSWORD=adminpassword \
-e LDAP_ORGANISATION="SeaweedFS" \
-e LDAP_DOMAIN="seaweedfs.test" \
osixia/openldap:latest || {
echo -e "${YELLOW}⚠️ OpenLDAP setup failed (optional for basic STS tests)${NC}"
return 0 # Don't fail - LDAP is optional
}
# Wait for LDAP to be ready
echo -e "${YELLOW}⏳ Waiting for OpenLDAP to be ready...${NC}"
for i in $(seq 1 30); do
if docker exec openldap-iam-test ldapsearch -x -H ldap://localhost -b "dc=seaweedfs,dc=test" -D "cn=admin,dc=seaweedfs,dc=test" -w adminpassword "(objectClass=*)" >/dev/null 2>&1; then
break
fi
sleep 1
done
# Add test users for LDAP STS testing
echo -e "${YELLOW}📝 Adding test users for LDAP STS...${NC}"
docker exec -i openldap-iam-test ldapadd -x -D "cn=admin,dc=seaweedfs,dc=test" -w adminpassword <<EOF 2>/dev/null || true
dn: ou=users,dc=seaweedfs,dc=test
objectClass: organizationalUnit
ou: users
dn: cn=testuser,ou=users,dc=seaweedfs,dc=test
objectClass: inetOrgPerson
cn: testuser
sn: Test User
uid: testuser
userPassword: testpass
dn: cn=ldapadmin,ou=users,dc=seaweedfs,dc=test
objectClass: inetOrgPerson
cn: ldapadmin
sn: LDAP Admin
uid: ldapadmin
userPassword: ldapadminpass
EOF
# Verify test users were created successfully
echo -e "${YELLOW}🔍 Verifying LDAP test users...${NC}"
if docker exec openldap-iam-test ldapsearch -x -D "cn=admin,dc=seaweedfs,dc=test" -w adminpassword -b "ou=users,dc=seaweedfs,dc=test" "(cn=testuser)" cn 2>/dev/null | grep -q "cn: testuser"; then
echo -e "${GREEN}[OK] Test user 'testuser' verified${NC}"
else
echo -e "${RED}[WARN] Could not verify test user 'testuser' - LDAP tests may fail${NC}"
fi
# Set environment for LDAP tests
export LDAP_URL="ldap://localhost:389"
export LDAP_BASE_DN="dc=seaweedfs,dc=test"
export LDAP_BIND_DN="cn=admin,dc=seaweedfs,dc=test"
export LDAP_BIND_PASSWORD="adminpassword"
echo -e "${GREEN}[OK] LDAP setup completed${NC}"
}
# Set up SeaweedFS test cluster
setup_seaweedfs_cluster() {
echo -e "\n${BLUE}2. Setting up SeaweedFS test cluster...${NC}"
@ -153,6 +229,7 @@ display_summary() {
echo -e "\n${BLUE}📊 Setup Summary${NC}"
echo -e "${BLUE}=================${NC}"
echo -e "Keycloak URL: ${KEYCLOAK_URL:-http://localhost:8080}"
echo -e "LDAP URL: ${LDAP_URL:-ldap://localhost:389}"
echo -e "S3 Endpoint: ${S3_ENDPOINT:-http://localhost:8333}"
echo -e "Test Timeout: ${TEST_TIMEOUT:-60m}"
echo -e "IAM Config: ${SCRIPT_DIR}/iam_config.json"
@ -161,6 +238,7 @@ display_summary() {
echo -e "${YELLOW}💡 You can now run tests with: make run-all-tests${NC}"
echo -e "${YELLOW}💡 Or run specific tests with: go test -v -timeout=60m -run TestName${NC}"
echo -e "${YELLOW}💡 To stop Keycloak: docker stop keycloak-iam-test${NC}"
echo -e "${YELLOW}💡 To stop LDAP: docker stop openldap-iam-test${NC}"
}
# Main execution
@ -177,6 +255,10 @@ main() {
exit 1
fi
# LDAP is optional but we try to set it up
setup_ldap
setup_steps+=("ldap")
if setup_seaweedfs_cluster; then
setup_steps+=("seaweedfs")
else

2
test/s3/iam/setup_keycloak.sh

@ -139,7 +139,7 @@ ensure_realm() {
echo -e "${GREEN}[OK] Realm '${REALM_NAME}' already exists${NC}"
else
echo -e "${YELLOW}📝 Creating realm '${REALM_NAME}'...${NC}"
if kcadm create realms -s realm="${REALM_NAME}" -s enabled=true 2>/dev/null; then
if kcadm create realms -s realm="${REALM_NAME}" -s enabled=true; then
echo -e "${GREEN}[OK] Realm created${NC}"
else
# Check if it exists now (might have been created by another process)

4
weed/iam/integration/advanced_policy_test.go

@ -25,7 +25,7 @@ func TestPolicyVariableSubstitution(t *testing.T) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -102,7 +102,7 @@ func TestConditionWithNumericComparison(t *testing.T) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
Condition: map[string]map[string]interface{}{

6
weed/iam/integration/iam_integration_test.go

@ -421,7 +421,7 @@ func TestTrustPolicyWildcardPrincipal(t *testing.T) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -440,7 +440,7 @@ func TestTrustPolicyWildcardPrincipal(t *testing.T) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": []interface{}{"specific-provider", "https://test-issuer.com"},
"Federated": []interface{}{"specific-provider", "test-oidc"},
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -646,7 +646,7 @@ func setupTestPoliciesAndRoles(t *testing.T, manager *IAMManager) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},

18
weed/iam/integration/iam_manager.go

@ -346,7 +346,7 @@ func (m *IAMManager) ValidateTrustPolicy(ctx context.Context, roleArn, provider,
if principal, ok := statement.Principal.(map[string]interface{}); ok {
if federated, ok := principal["Federated"].(string); ok {
// For OIDC, check against issuer URL
if provider == "oidc" && federated == "https://test-issuer.com" {
if provider == "oidc" && federated == "test-oidc" {
return true
}
// For LDAP, check against test-ldap
@ -391,8 +391,24 @@ func (m *IAMManager) validateTrustPolicyForWebIdentity(ctx context.Context, role
// The issuer is the federated provider for OIDC
if iss, ok := tokenClaims["iss"].(string); ok {
// Default to issuer URL
requestContext["aws:FederatedProvider"] = iss
requestContext["oidc:iss"] = iss
// Try to resolve provider name from issuer for better policy matching
// This allows policies to reference the provider name (e.g. "keycloak") instead of the full issuer URL
if m.stsService != nil {
for name, provider := range m.stsService.GetProviders() {
if oidcProvider, ok := provider.(interface{ GetIssuer() string }); ok {
confIssuer := oidcProvider.GetIssuer()
if confIssuer == iss {
requestContext["aws:FederatedProvider"] = name
break
}
}
}
}
}
if sub, ok := tokenClaims["sub"].(string); ok {

43
weed/iam/integration/iam_manager_trust.go

@ -0,0 +1,43 @@
package integration
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/iam/policy"
"github.com/seaweedfs/seaweedfs/weed/iam/utils"
)
// ValidateTrustPolicyForPrincipal validates if a principal is allowed to assume a role
func (m *IAMManager) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error {
if !m.initialized {
return fmt.Errorf("IAM manager not initialized")
}
// Extract role name from ARN
roleName := utils.ExtractRoleNameFromArn(roleArn)
// Get role definition
roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName)
if err != nil {
return fmt.Errorf("failed to get role %s: %w", roleName, err)
}
if roleDef.TrustPolicy == nil {
return fmt.Errorf("role has no trust policy")
}
// Create evaluation context
evalCtx := &policy.EvaluationContext{
Principal: principalArn,
Action: "sts:AssumeRole",
Resource: roleArn,
}
// Evaluate the trust policy
if !m.evaluateTrustPolicy(roleDef.TrustPolicy, evalCtx) {
return fmt.Errorf("trust policy denies access to principal: %s", principalArn)
}
return nil
}

571
weed/iam/ldap/ldap_provider.go

@ -0,0 +1,571 @@
package ldap
import (
"context"
"crypto/tls"
"fmt"
"net"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-ldap/ldap/v3"
"github.com/mitchellh/mapstructure"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/iam/providers"
)
// LDAPConfig holds configuration for LDAP provider
type LDAPConfig struct {
// Server is the LDAP server URL (ldap:// or ldaps://)
Server string `json:"server"`
// BindDN is the DN used to bind for searches (optional for anonymous bind)
BindDN string `json:"bindDN,omitempty"`
// BindPassword is the password for the bind DN
BindPassword string `json:"bindPassword,omitempty"`
// BaseDN is the base DN for user searches
BaseDN string `json:"baseDN"`
// UserFilter is the filter to find users (use %s for username placeholder)
// Example: "(uid=%s)" or "(cn=%s)" or "(&(objectClass=person)(uid=%s))"
UserFilter string `json:"userFilter"`
// GroupFilter is the filter to find user groups (use %s for user DN placeholder)
// Example: "(member=%s)" or "(memberUid=%s)"
GroupFilter string `json:"groupFilter,omitempty"`
// GroupBaseDN is the base DN for group searches (defaults to BaseDN)
GroupBaseDN string `json:"groupBaseDN,omitempty"`
// Attributes to retrieve from LDAP
Attributes LDAPAttributes `json:"attributes,omitempty"`
// UseTLS enables StartTLS
UseTLS bool `json:"useTLS,omitempty"`
// InsecureSkipVerify skips TLS certificate verification
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
// ConnectionTimeout is the connection timeout
ConnectionTimeout time.Duration `json:"connectionTimeout,omitempty"`
// PoolSize is the number of connections in the pool (default: 10)
PoolSize int `json:"poolSize,omitempty"`
// Audience is the expected audience for tokens (optional)
Audience string `json:"audience,omitempty"`
}
// LDAPAttributes maps LDAP attribute names
type LDAPAttributes struct {
Email string `json:"email,omitempty"` // Default: mail
DisplayName string `json:"displayName,omitempty"` // Default: cn
Groups string `json:"groups,omitempty"` // Default: memberOf
UID string `json:"uid,omitempty"` // Default: uid
}
// connectionPool manages a pool of LDAP connections for reuse
type connectionPool struct {
conns chan *ldap.Conn
mu sync.Mutex
size int
closed uint32 // atomic flag: 1 if closed, 0 if open
}
// LDAPProvider implements the IdentityProvider interface for LDAP
type LDAPProvider struct {
name string
config *LDAPConfig
initialized bool
mu sync.RWMutex
pool *connectionPool
}
// NewLDAPProvider creates a new LDAP provider
func NewLDAPProvider(name string) *LDAPProvider {
return &LDAPProvider{
name: name,
}
}
// Name returns the provider name
func (p *LDAPProvider) Name() string {
return p.name
}
// Initialize initializes the provider with configuration
func (p *LDAPProvider) Initialize(config interface{}) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.initialized {
return fmt.Errorf("LDAP provider already initialized")
}
cfg := &LDAPConfig{}
// Check if input is already the correct struct type
if c, ok := config.(*LDAPConfig); ok {
cfg = c
} else {
// Parse from map using mapstructure with weak typing and time duration hook
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
),
Result: cfg,
TagName: "json",
WeaklyTypedInput: true,
})
if err != nil {
return fmt.Errorf("failed to create config decoder: %w", err)
}
if err := decoder.Decode(config); err != nil {
return fmt.Errorf("failed to decode LDAP configuration: %w", err)
}
}
// Validate required fields
if cfg.Server == "" {
return fmt.Errorf("LDAP server URL is required")
}
if cfg.BaseDN == "" {
return fmt.Errorf("LDAP base DN is required")
}
if cfg.UserFilter == "" {
cfg.UserFilter = "(cn=%s)" // Default filter
}
// Warn if BindDN is configured but BindPassword is empty
if cfg.BindDN != "" && cfg.BindPassword == "" {
glog.Warningf("LDAP provider '%s' configured with BindDN but no BindPassword", p.name)
}
// Warn if InsecureSkipVerify is enabled
if cfg.InsecureSkipVerify {
glog.Warningf("LDAP provider '%s' has InsecureSkipVerify enabled. Do not use in production.", p.name)
}
// Set default attributes
if cfg.Attributes.Email == "" {
cfg.Attributes.Email = "mail"
}
if cfg.Attributes.DisplayName == "" {
cfg.Attributes.DisplayName = "cn"
}
if cfg.Attributes.Groups == "" {
cfg.Attributes.Groups = "memberOf"
}
if cfg.Attributes.UID == "" {
cfg.Attributes.UID = "uid"
}
if cfg.GroupBaseDN == "" {
cfg.GroupBaseDN = cfg.BaseDN
}
if cfg.ConnectionTimeout == 0 {
cfg.ConnectionTimeout = 10 * time.Second
}
p.config = cfg
// Initialize connection pool (default size: 10 connections)
poolSize := 10
if cfg.PoolSize > 0 {
poolSize = cfg.PoolSize
}
p.pool = &connectionPool{
conns: make(chan *ldap.Conn, poolSize),
size: poolSize,
}
p.initialized = true
glog.V(1).Infof("LDAP provider '%s' initialized: server=%s, baseDN=%s",
p.name, cfg.Server, cfg.BaseDN)
return nil
}
// getConnection gets a connection from the pool or creates a new one
func (p *LDAPProvider) getConnection() (*ldap.Conn, error) {
// Try to get a connection from the pool (non-blocking)
select {
case conn := <-p.pool.conns:
// Test if connection is still alive
if conn != nil && conn.IsClosing() {
conn.Close()
// Connection is dead, create a new one
return p.createConnection()
}
return conn, nil
default:
// Pool is empty, create a new connection
return p.createConnection()
}
}
// returnConnection returns a connection to the pool
func (p *LDAPProvider) returnConnection(conn *ldap.Conn) {
if conn == nil || conn.IsClosing() {
if conn != nil {
conn.Close()
}
return
}
// Check if pool is closed before attempting to send
if atomic.LoadUint32(&p.pool.closed) == 1 {
conn.Close()
return
}
// Try to return to pool (non-blocking)
select {
case p.pool.conns <- conn:
// Successfully returned to pool
default:
// Pool is full, close the connection
conn.Close()
}
}
// createConnection establishes a new connection to the LDAP server
func (p *LDAPProvider) createConnection() (*ldap.Conn, error) {
var conn *ldap.Conn
var err error
// Create dialer with timeout
dialer := &net.Dialer{Timeout: p.config.ConnectionTimeout}
// Parse server URL
if strings.HasPrefix(p.config.Server, "ldaps://") {
// LDAPS connection
tlsConfig := &tls.Config{
InsecureSkipVerify: p.config.InsecureSkipVerify,
MinVersion: tls.VersionTLS12,
}
conn, err = ldap.DialURL(p.config.Server, ldap.DialWithDialer(dialer), ldap.DialWithTLSConfig(tlsConfig))
} else {
// LDAP connection
conn, err = ldap.DialURL(p.config.Server, ldap.DialWithDialer(dialer))
if err == nil && p.config.UseTLS {
// StartTLS
tlsConfig := &tls.Config{
InsecureSkipVerify: p.config.InsecureSkipVerify,
MinVersion: tls.VersionTLS12,
}
if err = conn.StartTLS(tlsConfig); err != nil {
conn.Close()
return nil, fmt.Errorf("failed to start TLS: %w", err)
}
}
}
if err != nil {
return nil, fmt.Errorf("failed to connect to LDAP server: %w", err)
}
return conn, nil
}
// Close closes all connections in the pool
func (p *LDAPProvider) Close() error {
if p.pool == nil {
return nil
}
// Atomically mark pool as closed to prevent new connections being returned
if !atomic.CompareAndSwapUint32(&p.pool.closed, 0, 1) {
// Already closed
return nil
}
p.pool.mu.Lock()
defer p.pool.mu.Unlock()
// Now safe to close the channel since closed flag prevents new sends
close(p.pool.conns)
for conn := range p.pool.conns {
if conn != nil {
conn.Close()
}
}
return nil
}
// Authenticate authenticates a user with username:password credentials
func (p *LDAPProvider) Authenticate(ctx context.Context, credentials string) (*providers.ExternalIdentity, error) {
p.mu.RLock()
if !p.initialized {
p.mu.RUnlock()
return nil, fmt.Errorf("LDAP provider not initialized")
}
config := p.config
p.mu.RUnlock()
// Parse credentials (username:password format)
parts := strings.SplitN(credentials, ":", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid credentials format (expected username:password)")
}
username, password := parts[0], parts[1]
if username == "" || password == "" {
return nil, fmt.Errorf("username and password are required")
}
// Get connection from pool
conn, err := p.getConnection()
if err != nil {
return nil, err
}
// Note: defer returnConnection moved to after rebinding to service account
// First, bind with service account to search for user
if config.BindDN != "" {
err = conn.Bind(config.BindDN, config.BindPassword)
if err != nil {
glog.V(2).Infof("LDAP service bind failed: %v", err)
conn.Close() // Close on error, don't return to pool
return nil, fmt.Errorf("LDAP service bind failed: %w", err)
}
}
// Search for the user
userFilter := fmt.Sprintf(config.UserFilter, ldap.EscapeFilter(username))
searchRequest := ldap.NewSearchRequest(
config.BaseDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
1, // Size limit
int(config.ConnectionTimeout.Seconds()),
false,
userFilter,
[]string{"dn", config.Attributes.Email, config.Attributes.DisplayName, config.Attributes.UID, config.Attributes.Groups},
nil,
)
result, err := conn.Search(searchRequest)
if err != nil {
glog.V(2).Infof("LDAP user search failed: %v", err)
conn.Close() // Close on error
return nil, fmt.Errorf("LDAP user search failed: %w", err)
}
if len(result.Entries) == 0 {
conn.Close() // Close on error
return nil, fmt.Errorf("user not found")
}
if len(result.Entries) > 1 {
conn.Close() // Close on error
return nil, fmt.Errorf("multiple users found")
}
userEntry := result.Entries[0]
userDN := userEntry.DN
// Bind as the user to verify password
err = conn.Bind(userDN, password)
if err != nil {
glog.V(2).Infof("LDAP user bind failed for %s: %v", username, err)
conn.Close() // Close on error, don't return to pool
return nil, fmt.Errorf("authentication failed: invalid credentials")
}
// Rebind to service account before returning connection to pool
// This prevents pool corruption from authenticated user binds
if config.BindDN != "" {
if err = conn.Bind(config.BindDN, config.BindPassword); err != nil {
glog.V(2).Infof("LDAP rebind to service account failed: %v", err)
conn.Close() // Close on error, don't return to pool
return nil, fmt.Errorf("LDAP service account rebind failed after successful user authentication (check bindDN %q and its credentials): %w", config.BindDN, err)
}
}
// Now safe to defer return to pool with clean service account binding
defer p.returnConnection(conn)
// Build identity from LDAP attributes
identity := &providers.ExternalIdentity{
UserID: username,
Email: userEntry.GetAttributeValue(config.Attributes.Email),
DisplayName: userEntry.GetAttributeValue(config.Attributes.DisplayName),
Groups: userEntry.GetAttributeValues(config.Attributes.Groups),
Provider: p.name,
Attributes: map[string]string{
"dn": userDN,
"uid": userEntry.GetAttributeValue(config.Attributes.UID),
},
}
// If no groups from memberOf, try group search
if len(identity.Groups) == 0 && config.GroupFilter != "" {
groups, err := p.searchUserGroups(conn, userDN, config)
if err != nil {
glog.V(2).Infof("Group search failed for %s: %v", username, err)
} else {
identity.Groups = groups
}
}
glog.V(2).Infof("LDAP authentication successful for user: %s, groups: %v", username, identity.Groups)
return identity, nil
}
// searchUserGroups searches for groups the user belongs to
func (p *LDAPProvider) searchUserGroups(conn *ldap.Conn, userDN string, config *LDAPConfig) ([]string, error) {
groupFilter := fmt.Sprintf(config.GroupFilter, ldap.EscapeFilter(userDN))
searchRequest := ldap.NewSearchRequest(
config.GroupBaseDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
0,
int(config.ConnectionTimeout.Seconds()),
false,
groupFilter,
[]string{"cn", "dn"},
nil,
)
result, err := conn.Search(searchRequest)
if err != nil {
return nil, err
}
var groups []string
for _, entry := range result.Entries {
cn := entry.GetAttributeValue("cn")
if cn != "" {
groups = append(groups, cn)
}
}
return groups, nil
}
// GetUserInfo retrieves user information by user ID
func (p *LDAPProvider) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) {
p.mu.RLock()
if !p.initialized {
p.mu.RUnlock()
return nil, fmt.Errorf("LDAP provider not initialized")
}
config := p.config
p.mu.RUnlock()
// Get connection from pool
conn, err := p.getConnection()
if err != nil {
return nil, err
}
// Note: defer returnConnection moved to after bind
// Bind with service account
if config.BindDN != "" {
err = conn.Bind(config.BindDN, config.BindPassword)
if err != nil {
conn.Close() // Close on bind failure
return nil, fmt.Errorf("LDAP service bind failed: %w", err)
}
}
defer p.returnConnection(conn)
// Search for the user
userFilter := fmt.Sprintf(config.UserFilter, ldap.EscapeFilter(userID))
searchRequest := ldap.NewSearchRequest(
config.BaseDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
1,
int(config.ConnectionTimeout.Seconds()),
false,
userFilter,
[]string{"dn", config.Attributes.Email, config.Attributes.DisplayName, config.Attributes.UID, config.Attributes.Groups},
nil,
)
result, err := conn.Search(searchRequest)
if err != nil {
return nil, fmt.Errorf("LDAP user search failed: %w", err)
}
if len(result.Entries) == 0 {
return nil, fmt.Errorf("user not found")
}
if len(result.Entries) > 1 {
return nil, fmt.Errorf("multiple users found")
}
userEntry := result.Entries[0]
identity := &providers.ExternalIdentity{
UserID: userID,
Email: userEntry.GetAttributeValue(config.Attributes.Email),
DisplayName: userEntry.GetAttributeValue(config.Attributes.DisplayName),
Groups: userEntry.GetAttributeValues(config.Attributes.Groups),
Provider: p.name,
Attributes: map[string]string{
"dn": userEntry.DN,
"uid": userEntry.GetAttributeValue(config.Attributes.UID),
},
}
// If no groups from memberOf, try group search
if len(identity.Groups) == 0 && config.GroupFilter != "" {
groups, err := p.searchUserGroups(conn, userEntry.DN, config)
if err != nil {
glog.V(2).Infof("Group search failed for %s: %v", userID, err)
} else {
identity.Groups = groups
}
}
return identity, nil
}
// ValidateToken validates credentials (username:password format) and returns claims
func (p *LDAPProvider) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) {
identity, err := p.Authenticate(ctx, token)
if err != nil {
return nil, err
}
p.mu.RLock()
config := p.config
p.mu.RUnlock()
// If audience is configured, validate it (consistent with OIDC approach)
audience := p.name
if config.Audience != "" {
audience = config.Audience
}
// Populate standard TokenClaims fields for interface compliance
now := time.Now()
ttl := 1 * time.Hour // Default TTL for LDAP tokens
return &providers.TokenClaims{
Subject: identity.UserID,
Issuer: p.name,
Audience: audience,
IssuedAt: now,
ExpiresAt: now.Add(ttl),
Claims: map[string]interface{}{
"email": identity.Email,
"name": identity.DisplayName,
"groups": identity.Groups,
"dn": identity.Attributes["dn"],
"provider": p.name,
},
}, nil
}
// IsInitialized returns whether the provider is initialized
func (p *LDAPProvider) IsInitialized() bool {
p.mu.RLock()
defer p.mu.RUnlock()
return p.initialized
}

24
weed/iam/sts/cross_instance_token_test.go

@ -127,16 +127,16 @@ func TestCrossInstanceTokenUsage(t *testing.T) {
sessionId := TestSessionID
expiresAt := time.Now().Add(time.Hour)
tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
tokenFromA, err := instanceA.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err, "Instance A should generate token")
// Validate token on Instance B
claimsFromB, err := instanceB.tokenGenerator.ValidateSessionToken(tokenFromA)
claimsFromB, err := instanceB.GetTokenGenerator().ValidateSessionToken(tokenFromA)
require.NoError(t, err, "Instance B should validate token from Instance A")
assert.Equal(t, sessionId, claimsFromB.SessionId, "Session ID should match")
// Validate same token on Instance C
claimsFromC, err := instanceC.tokenGenerator.ValidateSessionToken(tokenFromA)
claimsFromC, err := instanceC.GetTokenGenerator().ValidateSessionToken(tokenFromA)
require.NoError(t, err, "Instance C should validate token from Instance A")
assert.Equal(t, sessionId, claimsFromC.SessionId, "Session ID should match")
@ -295,15 +295,15 @@ func TestSTSDistributedConfigurationRequirements(t *testing.T) {
// Generate token on Instance A
sessionId := "test-session"
expiresAt := time.Now().Add(time.Hour)
tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
tokenFromA, err := instanceA.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err)
// Instance A should validate its own token
_, err = instanceA.tokenGenerator.ValidateSessionToken(tokenFromA)
_, err = instanceA.GetTokenGenerator().ValidateSessionToken(tokenFromA)
assert.NoError(t, err, "Instance A should validate own token")
// Instance B should REJECT token due to different signing key
_, err = instanceB.tokenGenerator.ValidateSessionToken(tokenFromA)
_, err = instanceB.GetTokenGenerator().ValidateSessionToken(tokenFromA)
assert.Error(t, err, "Instance B should reject token with different signing key")
assert.Contains(t, err.Error(), "invalid token", "Should be signature validation error")
})
@ -339,11 +339,11 @@ func TestSTSDistributedConfigurationRequirements(t *testing.T) {
// Generate token on Instance A
sessionId := "test-session"
expiresAt := time.Now().Add(time.Hour)
tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
tokenFromA, err := instanceA.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err)
// Instance B should REJECT token due to different issuer
_, err = instanceB.tokenGenerator.ValidateSessionToken(tokenFromA)
_, err = instanceB.GetTokenGenerator().ValidateSessionToken(tokenFromA)
assert.Error(t, err, "Instance B should reject token with different issuer")
assert.Contains(t, err.Error(), "invalid issuer", "Should be issuer validation error")
})
@ -368,12 +368,12 @@ func TestSTSDistributedConfigurationRequirements(t *testing.T) {
// Generate token on Instance 0
sessionId := "multi-instance-test"
expiresAt := time.Now().Add(time.Hour)
token, err := instances[0].tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
token, err := instances[0].GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err)
// All other instances should validate the token
for i := 1; i < 5; i++ {
claims, err := instances[i].tokenGenerator.ValidateSessionToken(token)
claims, err := instances[i].GetTokenGenerator().ValidateSessionToken(token)
require.NoError(t, err, "Instance %d should validate token", i)
assert.Equal(t, sessionId, claims.SessionId, "Instance %d should extract correct session ID", i)
}
@ -486,10 +486,10 @@ func TestSTSRealWorldDistributedScenarios(t *testing.T) {
assert.True(t, sessionInfo3.ExpiresAt.After(time.Now()), "Session should not be expired")
// Step 5: Token should be identical when parsed
claims2, err := gateway2.tokenGenerator.ValidateSessionToken(sessionToken)
claims2, err := gateway2.GetTokenGenerator().ValidateSessionToken(sessionToken)
require.NoError(t, err)
claims3, err := gateway3.tokenGenerator.ValidateSessionToken(sessionToken)
claims3, err := gateway3.GetTokenGenerator().ValidateSessionToken(sessionToken)
require.NoError(t, err)
assert.Equal(t, claims2.SessionId, claims3.SessionId, "Session IDs should match")

24
weed/iam/sts/distributed_sts_test.go

@ -109,9 +109,9 @@ func TestDistributedSTSService(t *testing.T) {
expiresAt := time.Now().Add(time.Hour)
// Generate tokens from different instances
token1, err1 := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
token2, err2 := instance2.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
token3, err3 := instance3.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
token1, err1 := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
token2, err2 := instance2.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
token3, err3 := instance3.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err1, "Instance 1 token generation should succeed")
require.NoError(t, err2, "Instance 2 token generation should succeed")
@ -130,13 +130,13 @@ func TestDistributedSTSService(t *testing.T) {
expiresAt := time.Now().Add(time.Hour)
// Generate token on instance 1
token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
token, err := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err)
// Validate on all instances
claims1, err1 := instance1.tokenGenerator.ValidateSessionToken(token)
claims2, err2 := instance2.tokenGenerator.ValidateSessionToken(token)
claims3, err3 := instance3.tokenGenerator.ValidateSessionToken(token)
claims1, err1 := instance1.GetTokenGenerator().ValidateSessionToken(token)
claims2, err2 := instance2.GetTokenGenerator().ValidateSessionToken(token)
claims3, err3 := instance3.GetTokenGenerator().ValidateSessionToken(token)
require.NoError(t, err1, "Instance 1 should validate token from instance 1")
require.NoError(t, err2, "Instance 2 should validate token from instance 1")
@ -216,15 +216,15 @@ func TestSTSConfigurationValidation(t *testing.T) {
// Generate token on instance 1
sessionId := "test-session"
expiresAt := time.Now().Add(time.Hour)
token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
token, err := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err)
// Instance 1 should validate its own token
_, err = instance1.tokenGenerator.ValidateSessionToken(token)
_, err = instance1.GetTokenGenerator().ValidateSessionToken(token)
assert.NoError(t, err, "Instance 1 should validate its own token")
// Instance 2 should reject token from instance 1 (different signing key)
_, err = instance2.tokenGenerator.ValidateSessionToken(token)
_, err = instance2.GetTokenGenerator().ValidateSessionToken(token)
assert.Error(t, err, "Instance 2 should reject token with different signing key")
})
@ -258,12 +258,12 @@ func TestSTSConfigurationValidation(t *testing.T) {
// Generate token on instance 1
sessionId := "test-session"
expiresAt := time.Now().Add(time.Hour)
token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt)
token, err := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt)
require.NoError(t, err)
// Instance 2 should reject token due to issuer mismatch
// (Even though signing key is the same, issuer validation will fail)
_, err = instance2.tokenGenerator.ValidateSessionToken(token)
_, err = instance2.GetTokenGenerator().ValidateSessionToken(token)
assert.Error(t, err, "Instance 2 should reject token with different issuer")
})
}

15
weed/iam/sts/provider_factory.go

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/iam/ldap"
"github.com/seaweedfs/seaweedfs/weed/iam/oidc"
"github.com/seaweedfs/seaweedfs/weed/iam/providers"
)
@ -66,8 +67,11 @@ func (f *ProviderFactory) createOIDCProvider(config *ProviderConfig) (providers.
// createLDAPProvider creates an LDAP provider from configuration
func (f *ProviderFactory) createLDAPProvider(config *ProviderConfig) (providers.IdentityProvider, error) {
// TODO: Implement LDAP provider when available
return nil, fmt.Errorf("LDAP provider not implemented yet")
provider := ldap.NewLDAPProvider(config.Name)
if err := provider.Initialize(config.Config); err != nil {
return nil, fmt.Errorf("failed to initialize LDAP provider: %w", err)
}
return provider, nil
}
// createSAMLProvider creates a SAML provider from configuration
@ -317,7 +321,12 @@ func (f *ProviderFactory) validateOIDCConfig(config map[string]interface{}) erro
// validateLDAPConfig validates LDAP provider configuration
func (f *ProviderFactory) validateLDAPConfig(config map[string]interface{}) error {
// TODO: Implement when LDAP provider is available
if _, ok := config["server"]; !ok {
return fmt.Errorf("LDAP provider requires 'server' field")
}
if _, ok := config["baseDN"]; !ok {
return fmt.Errorf("LDAP provider requires 'baseDN' field")
}
return nil
}

14
weed/iam/sts/sts_service.go

@ -81,6 +81,12 @@ type STSService struct {
trustPolicyValidator TrustPolicyValidator // Interface for trust policy validation
}
// GetTokenGenerator returns the token generator used by the STS service.
// This keeps the underlying field unexported while still allowing read-only access.
func (s *STSService) GetTokenGenerator() *TokenGenerator {
return s.tokenGenerator
}
// STSConfig holds STS service configuration
type STSConfig struct {
// TokenDuration is the default duration for issued tokens
@ -95,6 +101,10 @@ type STSConfig struct {
// SigningKey is used to sign session tokens
SigningKey []byte `json:"signingKey"`
// AccountId is the AWS account ID used for federated user ARNs
// Defaults to "111122223333" if not specified
AccountId string `json:"accountId,omitempty"`
// Providers configuration - enables automatic provider loading
Providers []*ProviderConfig `json:"providers,omitempty"`
}
@ -807,7 +817,7 @@ func (s *STSService) calculateSessionDuration(durationSeconds *int64, tokenExpir
// extractSessionIdFromToken extracts session ID from JWT session token
func (s *STSService) extractSessionIdFromToken(sessionToken string) string {
// Parse JWT and extract session ID from claims
// Validate JWT and extract session claims
claims, err := s.tokenGenerator.ValidateJWTWithClaims(sessionToken)
if err != nil {
// For test compatibility, also handle direct session IDs
@ -862,7 +872,7 @@ func (s *STSService) ExpireSessionForTesting(ctx context.Context, sessionToken s
return fmt.Errorf("session token cannot be empty")
}
// Validate JWT token format
// Just validate the signature
_, err := s.tokenGenerator.ValidateJWTWithClaims(sessionToken)
if err != nil {
return fmt.Errorf("invalid session token format: %w", err)

5
weed/pb/master.proto

@ -4,6 +4,8 @@ package master_pb;
option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/master_pb";
import "volume_server.proto";
//////////////////////////////////////////////////
service Seaweed {
@ -84,6 +86,9 @@ message Heartbeat {
uint32 grpc_port = 20;
repeated string location_uuids = 21;
string id = 22; // volume server id, independent of ip:port for stable identification
// state flags
volume_server_pb.VolumeServerState state = 23;
}
message HeartbeatResponse {

185
weed/pb/master_pb/master.pb.go

@ -11,6 +11,7 @@ import (
sync "sync"
unsafe "unsafe"
volume_server_pb "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
@ -46,8 +47,10 @@ type Heartbeat struct {
GrpcPort uint32 `protobuf:"varint,20,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"`
LocationUuids []string `protobuf:"bytes,21,rep,name=location_uuids,json=locationUuids,proto3" json:"location_uuids,omitempty"`
Id string `protobuf:"bytes,22,opt,name=id,proto3" json:"id,omitempty"` // volume server id, independent of ip:port for stable identification
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
// state flags
State *volume_server_pb.VolumeServerState `protobuf:"bytes,23,opt,name=state,proto3" json:"state,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Heartbeat) Reset() {
@ -213,6 +216,13 @@ func (x *Heartbeat) GetId() string {
return ""
}
func (x *Heartbeat) GetState() *volume_server_pb.VolumeServerState {
if x != nil {
return x.State
}
return nil
}
type HeartbeatResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"`
@ -4159,7 +4169,7 @@ var File_master_proto protoreflect.FileDescriptor
const file_master_proto_rawDesc = "" +
"\n" +
"\fmaster.proto\x12\tmaster_pb\"\xd0\a\n" +
"\fmaster.proto\x12\tmaster_pb\x1a\x13volume_server.proto\"\x8b\b\n" +
"\tHeartbeat\x12\x0e\n" +
"\x02ip\x18\x01 \x01(\tR\x02ip\x12\x12\n" +
"\x04port\x18\x02 \x01(\rR\x04port\x12\x1d\n" +
@ -4185,7 +4195,8 @@ const file_master_proto_rawDesc = "" +
"\x11max_volume_counts\x18\x04 \x03(\v2).master_pb.Heartbeat.MaxVolumeCountsEntryR\x0fmaxVolumeCounts\x12\x1b\n" +
"\tgrpc_port\x18\x14 \x01(\rR\bgrpcPort\x12%\n" +
"\x0elocation_uuids\x18\x15 \x03(\tR\rlocationUuids\x12\x0e\n" +
"\x02id\x18\x16 \x01(\tR\x02id\x1aB\n" +
"\x02id\x18\x16 \x01(\tR\x02id\x129\n" +
"\x05state\x18\x17 \x01(\v2#.volume_server_pb.VolumeServerStateR\x05state\x1aB\n" +
"\x14MaxVolumeCountsEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\rR\x05value:\x028\x01\"\xcd\x02\n" +
@ -4634,6 +4645,7 @@ var file_master_proto_goTypes = []any{
(*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 69: master_pb.LookupEcVolumeResponse.EcShardIdLocation
(*ListClusterNodesResponse_ClusterNode)(nil), // 70: master_pb.ListClusterNodesResponse.ClusterNode
(*RaftListClusterServersResponse_ClusterServers)(nil), // 71: master_pb.RaftListClusterServersResponse.ClusterServers
(*volume_server_pb.VolumeServerState)(nil), // 72: volume_server_pb.VolumeServerState
}
var file_master_proto_depIdxs = []int32{
2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage
@ -4643,88 +4655,89 @@ var file_master_proto_depIdxs = []int32{
4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage
61, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry
5, // 7: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend
62, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry
63, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding
9, // 10: master_pb.KeepConnectedResponse.volume_location:type_name -> master_pb.VolumeLocation
10, // 11: master_pb.KeepConnectedResponse.cluster_node_update:type_name -> master_pb.ClusterNodeUpdate
64, // 12: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation
14, // 13: master_pb.AssignResponse.replicas:type_name -> master_pb.Location
14, // 14: master_pb.AssignResponse.location:type_name -> master_pb.Location
20, // 15: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection
2, // 16: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage
4, // 17: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage
65, // 18: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry
26, // 19: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo
66, // 20: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry
27, // 21: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo
67, // 22: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry
28, // 23: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo
68, // 24: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry
29, // 25: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo
69, // 26: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation
5, // 27: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend
70, // 28: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode
71, // 29: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers
14, // 30: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location
25, // 31: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
25, // 32: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
25, // 33: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
25, // 34: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
14, // 35: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location
0, // 36: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat
8, // 37: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest
12, // 38: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest
15, // 39: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest
15, // 40: master_pb.Seaweed.StreamAssign:input_type -> master_pb.AssignRequest
18, // 41: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest
21, // 42: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest
23, // 43: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest
30, // 44: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest
32, // 45: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest
34, // 46: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest
36, // 47: master_pb.Seaweed.DisableVacuum:input_type -> master_pb.DisableVacuumRequest
38, // 48: master_pb.Seaweed.EnableVacuum:input_type -> master_pb.EnableVacuumRequest
40, // 49: master_pb.Seaweed.VolumeMarkReadonly:input_type -> master_pb.VolumeMarkReadonlyRequest
42, // 50: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest
44, // 51: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest
46, // 52: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest
48, // 53: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest
50, // 54: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest
56, // 55: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest
52, // 56: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest
54, // 57: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest
58, // 58: master_pb.Seaweed.RaftLeadershipTransfer:input_type -> master_pb.RaftLeadershipTransferRequest
16, // 59: master_pb.Seaweed.VolumeGrow:input_type -> master_pb.VolumeGrowRequest
1, // 60: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse
11, // 61: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse
13, // 62: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse
17, // 63: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse
17, // 64: master_pb.Seaweed.StreamAssign:output_type -> master_pb.AssignResponse
19, // 65: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse
22, // 66: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse
24, // 67: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse
31, // 68: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse
33, // 69: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse
35, // 70: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse
37, // 71: master_pb.Seaweed.DisableVacuum:output_type -> master_pb.DisableVacuumResponse
39, // 72: master_pb.Seaweed.EnableVacuum:output_type -> master_pb.EnableVacuumResponse
41, // 73: master_pb.Seaweed.VolumeMarkReadonly:output_type -> master_pb.VolumeMarkReadonlyResponse
43, // 74: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse
45, // 75: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse
47, // 76: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse
49, // 77: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse
51, // 78: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse
57, // 79: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse
53, // 80: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse
55, // 81: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse
59, // 82: master_pb.Seaweed.RaftLeadershipTransfer:output_type -> master_pb.RaftLeadershipTransferResponse
60, // 83: master_pb.Seaweed.VolumeGrow:output_type -> master_pb.VolumeGrowResponse
60, // [60:84] is the sub-list for method output_type
36, // [36:60] is the sub-list for method input_type
36, // [36:36] is the sub-list for extension type_name
36, // [36:36] is the sub-list for extension extendee
0, // [0:36] is the sub-list for field type_name
72, // 7: master_pb.Heartbeat.state:type_name -> volume_server_pb.VolumeServerState
5, // 8: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend
62, // 9: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry
63, // 10: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding
9, // 11: master_pb.KeepConnectedResponse.volume_location:type_name -> master_pb.VolumeLocation
10, // 12: master_pb.KeepConnectedResponse.cluster_node_update:type_name -> master_pb.ClusterNodeUpdate
64, // 13: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation
14, // 14: master_pb.AssignResponse.replicas:type_name -> master_pb.Location
14, // 15: master_pb.AssignResponse.location:type_name -> master_pb.Location
20, // 16: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection
2, // 17: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage
4, // 18: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage
65, // 19: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry
26, // 20: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo
66, // 21: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry
27, // 22: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo
67, // 23: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry
28, // 24: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo
68, // 25: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry
29, // 26: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo
69, // 27: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation
5, // 28: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend
70, // 29: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode
71, // 30: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers
14, // 31: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location
25, // 32: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
25, // 33: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
25, // 34: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
25, // 35: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo
14, // 36: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location
0, // 37: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat
8, // 38: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest
12, // 39: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest
15, // 40: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest
15, // 41: master_pb.Seaweed.StreamAssign:input_type -> master_pb.AssignRequest
18, // 42: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest
21, // 43: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest
23, // 44: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest
30, // 45: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest
32, // 46: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest
34, // 47: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest
36, // 48: master_pb.Seaweed.DisableVacuum:input_type -> master_pb.DisableVacuumRequest
38, // 49: master_pb.Seaweed.EnableVacuum:input_type -> master_pb.EnableVacuumRequest
40, // 50: master_pb.Seaweed.VolumeMarkReadonly:input_type -> master_pb.VolumeMarkReadonlyRequest
42, // 51: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest
44, // 52: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest
46, // 53: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest
48, // 54: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest
50, // 55: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest
56, // 56: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest
52, // 57: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest
54, // 58: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest
58, // 59: master_pb.Seaweed.RaftLeadershipTransfer:input_type -> master_pb.RaftLeadershipTransferRequest
16, // 60: master_pb.Seaweed.VolumeGrow:input_type -> master_pb.VolumeGrowRequest
1, // 61: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse
11, // 62: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse
13, // 63: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse
17, // 64: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse
17, // 65: master_pb.Seaweed.StreamAssign:output_type -> master_pb.AssignResponse
19, // 66: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse
22, // 67: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse
24, // 68: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse
31, // 69: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse
33, // 70: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse
35, // 71: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse
37, // 72: master_pb.Seaweed.DisableVacuum:output_type -> master_pb.DisableVacuumResponse
39, // 73: master_pb.Seaweed.EnableVacuum:output_type -> master_pb.EnableVacuumResponse
41, // 74: master_pb.Seaweed.VolumeMarkReadonly:output_type -> master_pb.VolumeMarkReadonlyResponse
43, // 75: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse
45, // 76: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse
47, // 77: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse
49, // 78: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse
51, // 79: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse
57, // 80: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse
53, // 81: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse
55, // 82: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse
59, // 83: master_pb.Seaweed.RaftLeadershipTransfer:output_type -> master_pb.RaftLeadershipTransferResponse
60, // 84: master_pb.Seaweed.VolumeGrow:output_type -> master_pb.VolumeGrowResponse
61, // [61:85] is the sub-list for method output_type
37, // [37:61] is the sub-list for method input_type
37, // [37:37] is the sub-list for extension type_name
37, // [37:37] is the sub-list for extension extendee
0, // [0:37] is the sub-list for field type_name
}
func init() { file_master_proto_init() }

10
weed/pb/volume_server.proto

@ -7,6 +7,14 @@ import "remote.proto";
//////////////////////////////////////////////////
// Persistent state for volume servers.
message VolumeServerState {
// Whether the server is in maintenance (i.e. read-only) mode.
bool maintenance = 1;
}
//////////////////////////////////////////////////
service VolumeServer {
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) {
@ -45,6 +53,7 @@ service VolumeServer {
}
rpc VolumeStatus (VolumeStatusRequest) returns (VolumeStatusResponse) {
}
// TODO(issues/7977): add RPCs to control state flags
// copy the .idx .dat files, and mount this volume
rpc VolumeCopy (VolumeCopyRequest) returns (stream VolumeCopyResponse) {
@ -569,6 +578,7 @@ message VolumeServerStatusRequest {
}
message VolumeServerStatusResponse {
// TODO(issues/7977): add volume server state to response
repeated DiskStatus disk_statuses = 1;
MemStatus memory_status = 2;
string version = 3;

1133
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File

777
weed/pb/volume_server_pb/volume_server_grpc.pb.go
File diff suppressed because it is too large
View File

24
weed/remote_storage/gcs/gcs_storage_client.go

@ -14,6 +14,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
"github.com/seaweedfs/seaweedfs/weed/remote_storage"
"github.com/seaweedfs/seaweedfs/weed/util"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
@ -54,7 +56,27 @@ func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.
googleApplicationCredentials = util.ResolvePath(googleApplicationCredentials)
c, err := storage.NewClient(context.Background(), option.WithCredentialsFile(googleApplicationCredentials))
var clientOpts []option.ClientOption
if googleApplicationCredentials != "" {
var data []byte
var err error
if strings.HasPrefix(googleApplicationCredentials, "{") {
data = []byte(googleApplicationCredentials)
} else {
data, err = os.ReadFile(googleApplicationCredentials)
if err != nil {
return nil, fmt.Errorf("failed to read credentials file %s: %w", googleApplicationCredentials, err)
}
}
creds, err := google.CredentialsFromJSON(context.Background(), data, storage.ScopeFullControl)
if err != nil {
return nil, fmt.Errorf("failed to parse credentials: %w", err)
}
httpClient := oauth2.NewClient(context.Background(), creds.TokenSource)
clientOpts = append(clientOpts, option.WithHTTPClient(httpClient), option.WithoutAuthentication())
}
c, err := storage.NewClient(context.Background(), clientOpts...)
if err != nil {
return nil, fmt.Errorf("failed to create client: %w", err)
}

15
weed/s3api/auth_credentials_trust.go

@ -0,0 +1,15 @@
package s3api
import (
"context"
"fmt"
)
// ValidateTrustPolicyForPrincipal validates if a principal is allowed to assume a role
// Delegates to the IAM integration if available
func (iam *IdentityAccessManagement) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error {
if iam.iamIntegration != nil {
return iam.iamIntegration.ValidateTrustPolicyForPrincipal(ctx, roleArn, principalArn)
}
return fmt.Errorf("IAM integration not available")
}

12
weed/s3api/auth_signature_v4_sts_test.go

@ -16,8 +16,9 @@ import (
// MockIAMIntegration is a mock implementation of IAM integration for testing
type MockIAMIntegration struct {
authorizeFunc func(ctx context.Context, identity *IAMIdentity, action Action, bucket, object string, r *http.Request) s3err.ErrorCode
authCalled bool
authorizeFunc func(ctx context.Context, identity *IAMIdentity, action Action, bucket, object string, r *http.Request) s3err.ErrorCode
validateTrustPolicyFunc func(ctx context.Context, roleArn, principalArn string) error
authCalled bool
}
func (m *MockIAMIntegration) AuthorizeAction(ctx context.Context, identity *IAMIdentity, action Action, bucket, object string, r *http.Request) s3err.ErrorCode {
@ -36,6 +37,13 @@ func (m *MockIAMIntegration) ValidateSessionToken(ctx context.Context, token str
return nil, nil // Not needed for these tests
}
func (m *MockIAMIntegration) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error {
if m.validateTrustPolicyFunc != nil {
return m.validateTrustPolicyFunc(ctx, roleArn, principalArn)
}
return nil
}
// TestVerifyV4SignatureWithSTSIdentity tests that verifyV4Signature properly handles STS identities
// by falling back to IAM authorization when shouldCheckPermissions is true
func TestVerifyV4SignatureWithSTSIdentity(t *testing.T) {

8
weed/s3api/s3_end_to_end_test.go

@ -477,7 +477,7 @@ func setupS3ReadOnlyRole(ctx context.Context, manager *integration.IAMManager) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -521,7 +521,7 @@ func setupS3AdminRole(ctx context.Context, manager *integration.IAMManager) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -565,7 +565,7 @@ func setupS3WriteRole(ctx context.Context, manager *integration.IAMManager) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -614,7 +614,7 @@ func setupS3IPRestrictedRole(ctx context.Context, manager *integration.IAMManage
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},

9
weed/s3api/s3_iam_middleware.go

@ -23,6 +23,7 @@ type IAMIntegration interface {
AuthenticateJWT(ctx context.Context, r *http.Request) (*IAMIdentity, s3err.ErrorCode)
AuthorizeAction(ctx context.Context, identity *IAMIdentity, action Action, bucket string, objectKey string, r *http.Request) s3err.ErrorCode
ValidateSessionToken(ctx context.Context, token string) (*sts.SessionInfo, error)
ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error
}
// S3IAMIntegration provides IAM integration for S3 API
@ -224,6 +225,14 @@ func (s3iam *S3IAMIntegration) AuthorizeAction(ctx context.Context, identity *IA
return s3err.ErrNone
}
// ValidateTrustPolicyForPrincipal delegates to IAMManager to validate trust policy
func (s3iam *S3IAMIntegration) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error {
if s3iam.iamManager == nil {
return fmt.Errorf("IAM manager not available")
}
return s3iam.iamManager.ValidateTrustPolicyForPrincipal(ctx, roleArn, principalArn)
}
// IAMIdentity represents an authenticated identity with session information
type IAMIdentity struct {
Name string

10
weed/s3api/s3_jwt_auth_test.go

@ -387,7 +387,7 @@ func setupTestReadOnlyRole(ctx context.Context, manager *integration.IAMManager)
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -405,7 +405,7 @@ func setupTestReadOnlyRole(ctx context.Context, manager *integration.IAMManager)
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -449,7 +449,7 @@ func setupTestAdminRole(ctx context.Context, manager *integration.IAMManager) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -467,7 +467,7 @@ func setupTestAdminRole(ctx context.Context, manager *integration.IAMManager) {
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -510,7 +510,7 @@ func setupTestIPRestrictedRole(ctx context.Context, manager *integration.IAMMana
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},

4
weed/s3api/s3_multipart_iam_test.go

@ -568,7 +568,7 @@ func setupTestRolesForMultipart(ctx context.Context, manager *integration.IAMMan
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -586,7 +586,7 @@ func setupTestRolesForMultipart(ctx context.Context, manager *integration.IAMMan
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},

6
weed/s3api/s3_presigned_url_iam_test.go

@ -521,7 +521,7 @@ func setupTestRolesForPresigned(ctx context.Context, manager *integration.IAMMan
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -557,7 +557,7 @@ func setupTestRolesForPresigned(ctx context.Context, manager *integration.IAMMan
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},
@ -575,7 +575,7 @@ func setupTestRolesForPresigned(ctx context.Context, manager *integration.IAMMan
{
Effect: "Allow",
Principal: map[string]interface{}{
"Federated": "https://test-issuer.com",
"Federated": "test-oidc",
},
Action: []string{"sts:AssumeRoleWithWebIdentity"},
},

40
weed/s3api/s3api_server.go

@ -190,7 +190,7 @@ func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, expl
// Initialize STS HTTP handlers for AssumeRoleWithWebIdentity endpoint
if stsService := iamManager.GetSTSService(); stsService != nil {
s3ApiServer.stsHandlers = NewSTSHandlers(stsService)
s3ApiServer.stsHandlers = NewSTSHandlers(stsService, iam)
glog.V(1).Infof("STS HTTP handlers initialized for AssumeRoleWithWebIdentity")
}
@ -622,7 +622,16 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
// 1. Explicit query param match (highest priority)
apiRouter.Methods(http.MethodPost).Path("/").Queries("Action", "AssumeRoleWithWebIdentity").
HandlerFunc(track(s3a.stsHandlers.HandleSTSRequest, "STS"))
glog.V(0).Infof("STS API enabled on S3 port (AssumeRoleWithWebIdentity)")
// AssumeRole - requires SigV4 authentication
apiRouter.Methods(http.MethodPost).Path("/").Queries("Action", "AssumeRole").
HandlerFunc(track(s3a.stsHandlers.HandleSTSRequest, "STS-AssumeRole"))
// AssumeRoleWithLDAPIdentity - uses LDAP credentials
apiRouter.Methods(http.MethodPost).Path("/").Queries("Action", "AssumeRoleWithLDAPIdentity").
HandlerFunc(track(s3a.stsHandlers.HandleSTSRequest, "STS-LDAP"))
glog.V(0).Infof("STS API enabled on S3 port (AssumeRole, AssumeRoleWithWebIdentity, AssumeRoleWithLDAPIdentity)")
}
// Embedded IAM API endpoint
@ -631,10 +640,31 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
if s3a.embeddedIam != nil {
// 2. Authenticated IAM requests
// Only match if the request appears to be authenticated (AWS Signature)
// This prevents unauthenticated STS requests (like AssumeRoleWithWebIdentity in body)
// from being captured by the IAM handler which would reject them.
// AND is not an STS request (which should be handled by STS handlers)
iamMatcher := func(r *http.Request, rm *mux.RouteMatch) bool {
return getRequestAuthType(r) != authTypeAnonymous
if getRequestAuthType(r) == authTypeAnonymous {
return false
}
// Check Action parameter in both form data and query string
// We iterate ParseForm but ignore errors to ensure we attempt to parse the body
// even if it's malformed, then check FormValue which covers both body and query.
// This guards against misrouting STS requests if the body is invalid.
r.ParseForm()
action := r.FormValue("Action")
// If FormValue yielded nothing (possibly due to ParseForm failure failing to populate Form),
// explicitly fallback to Query string to be safe.
if action == "" {
action = r.URL.Query().Get("Action")
}
// Exclude STS actions - let them be handled by STS handlers
if action == "AssumeRole" || action == "AssumeRoleWithWebIdentity" || action == "AssumeRoleWithLDAPIdentity" {
return false
}
return true
}
apiRouter.Methods(http.MethodPost).Path("/").MatcherFunc(iamMatcher).

4
weed/s3api/s3api_server_routing_test.go

@ -150,8 +150,8 @@ func TestRouting_IAMMatcherLogic(t *testing.T) {
name: "AWS4 signature with STS action in body",
authHeader: "AWS4-HMAC-SHA256 Credential=AKIA.../...",
queryParams: "",
expectsIAM: true,
description: "Authenticated STS action should still route to IAM (auth takes precedence)",
expectsIAM: false,
description: "Authenticated STS action should route to STS handler (STS handlers handle their own auth)",
},
}

429
weed/s3api/s3api_sts.go

@ -5,6 +5,8 @@ package s3api
// AWS SDKs to obtain temporary credentials using OIDC/JWT tokens.
import (
"crypto/rand"
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
@ -13,7 +15,9 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/iam/ldap"
"github.com/seaweedfs/seaweedfs/weed/iam/sts"
"github.com/seaweedfs/seaweedfs/weed/iam/utils"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
@ -28,18 +32,61 @@ const (
stsDurationSeconds = "DurationSeconds"
// STS Action names
actionAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
actionAssumeRole = "AssumeRole"
actionAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
actionAssumeRoleWithLDAPIdentity = "AssumeRoleWithLDAPIdentity"
// LDAP parameter names
stsLDAPUsername = "LDAPUsername"
stsLDAPPassword = "LDAPPassword"
stsLDAPProviderName = "LDAPProviderName"
)
// STS duration constants (AWS specification)
const (
minDurationSeconds = int64(900) // 15 minutes
maxDurationSeconds = int64(43200) // 12 hours
// Default account ID for federated users
defaultAccountId = "111122223333"
)
// parseDurationSeconds parses and validates the DurationSeconds parameter
// Returns nil if the parameter is not provided, or a pointer to the parsed value
func parseDurationSeconds(r *http.Request) (*int64, STSErrorCode, error) {
dsStr := r.FormValue("DurationSeconds")
if dsStr == "" {
return nil, "", nil
}
ds, err := strconv.ParseInt(dsStr, 10, 64)
if err != nil {
return nil, STSErrInvalidParameterValue, fmt.Errorf("invalid DurationSeconds: %w", err)
}
if ds < minDurationSeconds || ds > maxDurationSeconds {
return nil, STSErrInvalidParameterValue,
fmt.Errorf("DurationSeconds must be between %d and %d seconds", minDurationSeconds, maxDurationSeconds)
}
return &ds, "", nil
}
// Removed generateSecureCredentials - now using STS service's JWT token generation
// The STS service generates proper JWT tokens with embedded claims that can be validated
// across distributed instances without shared state.
// STSHandlers provides HTTP handlers for STS operations
type STSHandlers struct {
stsService *sts.STSService
iam *IdentityAccessManagement
}
// NewSTSHandlers creates a new STSHandlers instance
func NewSTSHandlers(stsService *sts.STSService) *STSHandlers {
func NewSTSHandlers(stsService *sts.STSService, iam *IdentityAccessManagement) *STSHandlers {
return &STSHandlers{
stsService: stsService,
iam: iam,
}
}
@ -62,8 +109,12 @@ func (h *STSHandlers) HandleSTSRequest(w http.ResponseWriter, r *http.Request) {
// Route based on action
action := r.Form.Get(stsAction)
switch action {
case actionAssumeRole:
h.handleAssumeRole(w, r)
case actionAssumeRoleWithWebIdentity:
h.handleAssumeRoleWithWebIdentity(w, r)
case actionAssumeRoleWithLDAPIdentity:
h.handleAssumeRoleWithLDAPIdentity(w, r)
default:
h.writeSTSErrorResponse(w, r, STSErrInvalidAction,
fmt.Errorf("unsupported action: %s", action))
@ -98,29 +149,11 @@ func (h *STSHandlers) handleAssumeRoleWithWebIdentity(w http.ResponseWriter, r *
return
}
// Parse and validate DurationSeconds
var durationSeconds *int64
if dsStr := r.FormValue("DurationSeconds"); dsStr != "" {
ds, err := strconv.ParseInt(dsStr, 10, 64)
if err != nil {
h.writeSTSErrorResponse(w, r, STSErrInvalidParameterValue,
fmt.Errorf("invalid DurationSeconds: %w", err))
return
}
// Enforce AWS STS-compatible duration range for AssumeRoleWithWebIdentity
// AWS allows 900 seconds (15 minutes) to 43200 seconds (12 hours)
const (
minDurationSeconds = int64(900)
maxDurationSeconds = int64(43200)
)
if ds < minDurationSeconds || ds > maxDurationSeconds {
h.writeSTSErrorResponse(w, r, STSErrInvalidParameterValue,
fmt.Errorf("DurationSeconds must be between %d and %d seconds", minDurationSeconds, maxDurationSeconds))
return
}
durationSeconds = &ds
// Parse and validate DurationSeconds using helper
durationSeconds, errCode, err := parseDurationSeconds(r)
if err != nil {
h.writeSTSErrorResponse(w, r, errCode, err)
return
}
// Check if STS service is initialized
@ -179,6 +212,322 @@ func (h *STSHandlers) handleAssumeRoleWithWebIdentity(w http.ResponseWriter, r *
s3err.WriteXMLResponse(w, r, http.StatusOK, xmlResponse)
}
// handleAssumeRole handles the AssumeRole API action
// This requires AWS Signature V4 authentication
func (h *STSHandlers) handleAssumeRole(w http.ResponseWriter, r *http.Request) {
// Extract parameters from form
roleArn := r.FormValue("RoleArn")
roleSessionName := r.FormValue("RoleSessionName")
// Validate required parameters
if roleArn == "" {
h.writeSTSErrorResponse(w, r, STSErrMissingParameter,
fmt.Errorf("RoleArn is required"))
return
}
if roleSessionName == "" {
h.writeSTSErrorResponse(w, r, STSErrMissingParameter,
fmt.Errorf("RoleSessionName is required"))
return
}
// Parse and validate DurationSeconds using helper
durationSeconds, errCode, err := parseDurationSeconds(r)
if err != nil {
h.writeSTSErrorResponse(w, r, errCode, err)
return
}
// Check if STS service is initialized
if h.stsService == nil || !h.stsService.IsInitialized() {
h.writeSTSErrorResponse(w, r, STSErrSTSNotReady,
fmt.Errorf("STS service not initialized"))
return
}
// Check if IAM is available for SigV4 verification
if h.iam == nil {
h.writeSTSErrorResponse(w, r, STSErrSTSNotReady,
fmt.Errorf("IAM not configured for STS"))
return
}
// Validate AWS SigV4 authentication
identity, _, _, _, sigErrCode := h.iam.verifyV4Signature(r, false)
if sigErrCode != s3err.ErrNone {
glog.V(2).Infof("AssumeRole SigV4 verification failed: %v", sigErrCode)
h.writeSTSErrorResponse(w, r, STSErrAccessDenied,
fmt.Errorf("invalid AWS signature: %v", sigErrCode))
return
}
if identity == nil {
h.writeSTSErrorResponse(w, r, STSErrAccessDenied,
fmt.Errorf("unable to identify caller"))
return
}
glog.V(2).Infof("AssumeRole: caller identity=%s, roleArn=%s, sessionName=%s",
identity.Name, roleArn, roleSessionName)
// Check if the caller is authorized to assume the role (sts:AssumeRole permission)
// This validates that the caller has a policy allowing sts:AssumeRole on the target role
if authErr := h.iam.VerifyActionPermission(r, identity, Action("sts:AssumeRole"), "", roleArn); authErr != s3err.ErrNone {
glog.V(2).Infof("AssumeRole: caller %s is not authorized to assume role %s", identity.Name, roleArn)
h.writeSTSErrorResponse(w, r, STSErrAccessDenied,
fmt.Errorf("user %s is not authorized to assume role %s", identity.Name, roleArn))
return
}
// Validate that the target role trusts the caller (Trust Policy)
// This ensures the role's trust policy explicitly allows the principal to assume it
if err := h.iam.ValidateTrustPolicyForPrincipal(r.Context(), roleArn, identity.PrincipalArn); err != nil {
glog.V(2).Infof("AssumeRole: trust policy validation failed for %s to assume %s: %v", identity.Name, roleArn, err)
h.writeSTSErrorResponse(w, r, STSErrAccessDenied, fmt.Errorf("trust policy denies access"))
return
}
// Generate common STS components
stsCreds, assumedUser, err := h.prepareSTSCredentials(roleArn, roleSessionName, identity.PrincipalArn, durationSeconds, nil)
if err != nil {
h.writeSTSErrorResponse(w, r, STSErrInternalError, err)
return
}
// Build and return response
xmlResponse := &AssumeRoleResponse{
Result: AssumeRoleResult{
Credentials: stsCreds,
AssumedRoleUser: assumedUser,
},
}
xmlResponse.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano())
s3err.WriteXMLResponse(w, r, http.StatusOK, xmlResponse)
}
// handleAssumeRoleWithLDAPIdentity handles the AssumeRoleWithLDAPIdentity API action
func (h *STSHandlers) handleAssumeRoleWithLDAPIdentity(w http.ResponseWriter, r *http.Request) {
// Extract parameters from form
roleArn := r.FormValue("RoleArn")
roleSessionName := r.FormValue("RoleSessionName")
ldapUsername := r.FormValue(stsLDAPUsername)
ldapPassword := r.FormValue(stsLDAPPassword)
// Validate required parameters
if roleArn == "" {
h.writeSTSErrorResponse(w, r, STSErrMissingParameter,
fmt.Errorf("RoleArn is required"))
return
}
if roleSessionName == "" {
h.writeSTSErrorResponse(w, r, STSErrMissingParameter,
fmt.Errorf("RoleSessionName is required"))
return
}
if ldapUsername == "" {
h.writeSTSErrorResponse(w, r, STSErrMissingParameter,
fmt.Errorf("LDAPUsername is required"))
return
}
if ldapPassword == "" {
h.writeSTSErrorResponse(w, r, STSErrMissingParameter,
fmt.Errorf("LDAPPassword is required"))
return
}
// Parse and validate DurationSeconds using helper
durationSeconds, errCode, err := parseDurationSeconds(r)
if err != nil {
h.writeSTSErrorResponse(w, r, errCode, err)
return
}
// Check if STS service is initialized
if h.stsService == nil || !h.stsService.IsInitialized() {
h.writeSTSErrorResponse(w, r, STSErrSTSNotReady,
fmt.Errorf("STS service not initialized"))
return
}
// Optional: specific LDAP provider name
ldapProviderName := r.FormValue(stsLDAPProviderName)
// Find an LDAP provider from the registered providers
var ldapProvider *ldap.LDAPProvider
ldapProvidersFound := 0
for _, provider := range h.stsService.GetProviders() {
// Check if this is an LDAP provider by type assertion
if p, ok := provider.(*ldap.LDAPProvider); ok {
if ldapProviderName != "" && p.Name() == ldapProviderName {
ldapProvider = p
break
} else if ldapProviderName == "" && ldapProvider == nil {
ldapProvider = p
}
ldapProvidersFound++
}
}
if ldapProvidersFound > 1 && ldapProviderName == "" {
glog.Warningf("Multiple LDAP providers found (%d). Using the first one found (non-deterministic). Consider specifying LDAPProviderName.", ldapProvidersFound)
}
if ldapProvider == nil {
glog.V(2).Infof("AssumeRoleWithLDAPIdentity: no LDAP provider configured")
h.writeSTSErrorResponse(w, r, STSErrSTSNotReady,
fmt.Errorf("no LDAP provider configured - please add an LDAP provider to IAM configuration"))
return
}
// Authenticate with LDAP provider
// The provider expects credentials in "username:password" format
credentials := ldapUsername + ":" + ldapPassword
identity, err := ldapProvider.Authenticate(r.Context(), credentials)
if err != nil {
glog.V(2).Infof("AssumeRoleWithLDAPIdentity: LDAP authentication failed for user %s: %v", ldapUsername, err)
h.writeSTSErrorResponse(w, r, STSErrAccessDenied,
fmt.Errorf("authentication failed"))
return
}
glog.V(2).Infof("AssumeRoleWithLDAPIdentity: user %s authenticated successfully, groups=%v",
ldapUsername, identity.Groups)
// Verify that the identity is allowed to assume the role
// We create a temporary identity to represent the LDAP user for permission checking
// The checking logic will verify if the role's trust policy allows this principal
// Use configured account ID or default to "111122223333" for federated users
accountId := defaultAccountId
if h.stsService != nil && h.stsService.Config != nil && h.stsService.Config.AccountId != "" {
accountId = h.stsService.Config.AccountId
}
ldapUserIdentity := &Identity{
Name: identity.UserID,
Account: &Account{
DisplayName: identity.DisplayName,
EmailAddress: identity.Email,
Id: identity.UserID,
},
PrincipalArn: fmt.Sprintf("arn:aws:iam::%s:user/%s", accountId, identity.UserID),
}
// Verify that the identity is allowed to assume the role by checking the Trust Policy
// The LDAP user doesn't have identity policies, so we strictly check if the Role trusts this principal.
if err := h.iam.ValidateTrustPolicyForPrincipal(r.Context(), roleArn, ldapUserIdentity.PrincipalArn); err != nil {
glog.V(2).Infof("AssumeRoleWithLDAPIdentity: trust policy validation failed for %s to assume %s: %v", ldapUsername, roleArn, err)
h.writeSTSErrorResponse(w, r, STSErrAccessDenied, fmt.Errorf("trust policy denies access"))
return
}
// Generate common STS components with LDAP-specific claims
modifyClaims := func(claims *sts.STSSessionClaims) {
claims.WithIdentityProvider("ldap", identity.UserID, identity.Provider)
}
stsCreds, assumedUser, err := h.prepareSTSCredentials(roleArn, roleSessionName, ldapUserIdentity.PrincipalArn, durationSeconds, modifyClaims)
if err != nil {
h.writeSTSErrorResponse(w, r, STSErrInternalError, err)
return
}
// Build and return response
xmlResponse := &AssumeRoleWithLDAPIdentityResponse{
Result: LDAPIdentityResult{
Credentials: stsCreds,
AssumedRoleUser: assumedUser,
},
}
xmlResponse.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano())
s3err.WriteXMLResponse(w, r, http.StatusOK, xmlResponse)
}
// prepareSTSCredentials extracts common shared logic for credential generation
func (h *STSHandlers) prepareSTSCredentials(roleArn, roleSessionName, principalArn string,
durationSeconds *int64, modifyClaims func(*sts.STSSessionClaims)) (STSCredentials, *AssumedRoleUser, error) {
// Calculate duration
duration := time.Hour // Default 1 hour
if durationSeconds != nil {
duration = time.Duration(*durationSeconds) * time.Second
}
// Generate session ID
sessionId, err := sts.GenerateSessionId()
if err != nil {
return STSCredentials{}, nil, fmt.Errorf("failed to generate session ID: %w", err)
}
expiration := time.Now().Add(duration)
// Extract role name from ARN for proper response formatting
roleName := utils.ExtractRoleNameFromArn(roleArn)
if roleName == "" {
roleName = roleArn // Fallback to full ARN if extraction fails
}
// Create session claims with role information
claims := sts.NewSTSSessionClaims(sessionId, h.stsService.Config.Issuer, expiration).
WithSessionName(roleSessionName).
WithRoleInfo(roleArn, fmt.Sprintf("%s:%s", roleName, roleSessionName), principalArn)
// Apply custom claims if provided (e.g., LDAP identity)
if modifyClaims != nil {
modifyClaims(claims)
}
// Generate JWT session token
sessionToken, err := h.stsService.GetTokenGenerator().GenerateJWTWithClaims(claims)
if err != nil {
return STSCredentials{}, nil, fmt.Errorf("failed to generate session token: %w", err)
}
// Generate temporary credentials (cryptographically secure)
// AccessKeyId: ASIA + 16 chars hex
// SecretAccessKey: 40 chars base64
randBytes := make([]byte, 30) // Sufficient for both
if _, err := rand.Read(randBytes); err != nil {
return STSCredentials{}, nil, fmt.Errorf("failed to generate random bytes: %w", err)
}
// Generate AccessKeyId (ASIA + 16 upper-hex chars)
// We use 8 bytes (16 hex chars)
accessKeyId := "ASIA" + fmt.Sprintf("%X", randBytes[:8])
// Generate SecretAccessKey: 30 random bytes, base64-encoded to a 40-character string
secretBytes := make([]byte, 30)
if _, err := rand.Read(secretBytes); err != nil {
return STSCredentials{}, nil, fmt.Errorf("failed to generate secret bytes: %w", err)
}
secretAccessKey := base64.StdEncoding.EncodeToString(secretBytes)
// Get account ID from STS config or use default
accountId := defaultAccountId
if h.stsService != nil && h.stsService.Config != nil && h.stsService.Config.AccountId != "" {
accountId = h.stsService.Config.AccountId
}
stsCreds := STSCredentials{
AccessKeyId: accessKeyId,
SecretAccessKey: secretAccessKey,
SessionToken: sessionToken,
Expiration: expiration.Format(time.RFC3339),
}
assumedUser := &AssumedRoleUser{
AssumedRoleId: fmt.Sprintf("%s:%s", roleName, roleSessionName),
Arn: fmt.Sprintf("arn:aws:sts::%s:assumed-role/%s/%s", accountId, roleName, roleSessionName),
}
return stsCreds, assumedUser, nil
}
// STS Response types for XML marshaling
// AssumeRoleWithWebIdentityResponse is the response for AssumeRoleWithWebIdentity
@ -211,6 +560,36 @@ type AssumedRoleUser struct {
Arn string `xml:"Arn"`
}
// AssumeRoleResponse is the response for AssumeRole
type AssumeRoleResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse"`
Result AssumeRoleResult `xml:"AssumeRoleResult"`
ResponseMetadata struct {
RequestId string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}
// AssumeRoleResult contains the result of AssumeRole
type AssumeRoleResult struct {
Credentials STSCredentials `xml:"Credentials"`
AssumedRoleUser *AssumedRoleUser `xml:"AssumedRoleUser,omitempty"`
}
// AssumeRoleWithLDAPIdentityResponse is the response for AssumeRoleWithLDAPIdentity
type AssumeRoleWithLDAPIdentityResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse"`
Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
ResponseMetadata struct {
RequestId string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}
// LDAPIdentityResult contains the result of AssumeRoleWithLDAPIdentity
type LDAPIdentityResult struct {
Credentials STSCredentials `xml:"Credentials"`
AssumedRoleUser *AssumedRoleUser `xml:"AssumedRoleUser,omitempty"`
}
// STS Error types
// STSErrorCode represents STS error codes

4
weed/security/jwt.go

@ -24,6 +24,8 @@ type SeaweedFileIdClaims struct {
// Right now, it only contains the standard claims; but this might be extended later
// for more fine-grained permissions.
type SeaweedFilerClaims struct {
AllowedPrefixes []string `json:"allowed_prefixes,omitempty"`
AllowedMethods []string `json:"allowed_methods,omitempty"`
jwt.RegisteredClaims
}
@ -56,7 +58,7 @@ func GenJwtForFilerServer(signingKey SigningKey, expiresAfterSec int) EncodedJwt
}
claims := SeaweedFilerClaims{
jwt.RegisteredClaims{},
RegisteredClaims: jwt.RegisteredClaims{},
}
if expiresAfterSec > 0 {
claims.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Second * time.Duration(expiresAfterSec)))

143
weed/server/filer_jwt_test.go

@ -0,0 +1,143 @@
package weed_server
import (
"net/http/httptest"
"testing"
"time"
"github.com/golang-jwt/jwt/v5"
"github.com/seaweedfs/seaweedfs/weed/security"
)
func TestFilerServer_maybeCheckJwtAuthorization_Scoped(t *testing.T) {
signingKey := "secret"
filerGuard := security.NewGuard(nil, signingKey, 0, signingKey, 0)
fs := &FilerServer{
filerGuard: filerGuard,
}
// Helper to generate token
genToken := func(allowedPrefixes []string, allowedMethods []string) string {
claims := security.SeaweedFilerClaims{
AllowedPrefixes: allowedPrefixes,
AllowedMethods: allowedMethods,
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
str, err := token.SignedString([]byte(signingKey))
if err != nil {
t.Fatalf("failed to sign token: %v", err)
}
return str
}
tests := []struct {
name string
token string
method string
path string
isWrite bool
expectAuthorized bool
}{
{
name: "no restrictions",
token: genToken(nil, nil),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: true,
},
{
name: "allowed prefix match",
token: genToken([]string{"/data"}, nil),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: true,
},
{
name: "allowed prefix mismatch",
token: genToken([]string{"/private"}, nil),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: false,
},
{
name: "allowed method match",
token: genToken(nil, []string{"GET"}),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: true,
},
{
name: "allowed method mismatch",
token: genToken(nil, []string{"POST"}),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: false,
},
{
name: "both match",
token: genToken([]string{"/data"}, []string{"GET"}),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: true,
},
{
name: "prefix match, method mismatch",
token: genToken([]string{"/data"}, []string{"POST"}),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: false,
},
{
name: "multiple prefixes match",
token: genToken([]string{"/other", "/data"}, nil),
method: "GET",
path: "/data/test",
isWrite: false,
expectAuthorized: true,
},
{
name: "write operation with method restriction",
token: genToken(nil, []string{"POST", "PUT"}),
method: "POST",
path: "/data/upload",
isWrite: true,
expectAuthorized: true,
},
{
name: "root path with prefix restriction",
token: genToken([]string{"/data"}, nil),
method: "GET",
path: "/",
isWrite: false,
expectAuthorized: false,
},
{
name: "exact prefix match",
token: genToken([]string{"/data"}, nil),
method: "GET",
path: "/data",
isWrite: false,
expectAuthorized: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest(tt.method, tt.path, nil)
req.Header.Set("Authorization", "Bearer "+tt.token)
if authorized := fs.maybeCheckJwtAuthorization(req, tt.isWrite); authorized != tt.expectAuthorized {
t.Errorf("expected authorized=%v, got %v", tt.expectAuthorized, authorized)
}
})
}
}

40
weed/server/filer_server_handlers.go

@ -4,7 +4,6 @@ import (
"context"
"errors"
"net/http"
"os"
"strconv"
"strings"
"sync/atomic"
@ -148,7 +147,7 @@ func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Reque
statusRecorder := stats.NewStatusResponseWriter(w)
w = statusRecorder
os.Stdout.WriteString("Request: " + r.Method + " " + r.URL.String() + "\n")
glog.V(4).Infof("Request: %s %s", r.Method, r.URL.Path)
origin := r.Header.Get("Origin")
if origin != "" {
@ -242,9 +241,42 @@ func (fs *FilerServer) maybeCheckJwtAuthorization(r *http.Request, isWrite bool)
if !token.Valid {
glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr)
return false
} else {
return true
}
claims, ok := token.Claims.(*security.SeaweedFilerClaims)
if !ok {
glog.V(1).Infof("jwt claims not of type *SeaweedFilerClaims from %s", r.RemoteAddr)
return false
}
if len(claims.AllowedPrefixes) > 0 {
hasPrefix := false
for _, prefix := range claims.AllowedPrefixes {
if strings.HasPrefix(r.URL.Path, prefix) {
hasPrefix = true
break
}
}
if !hasPrefix {
glog.V(1).Infof("jwt path not allowed from %s: %v", r.RemoteAddr, r.URL.Path)
return false
}
}
if len(claims.AllowedMethods) > 0 {
hasMethod := false
for _, method := range claims.AllowedMethods {
if method == r.Method {
hasMethod = true
break
}
}
if !hasMethod {
glog.V(1).Infof("jwt method not allowed from %s: %v", r.RemoteAddr, r.Method)
return false
}
}
return true
}
func (fs *FilerServer) filerHealthzHandler(w http.ResponseWriter, r *http.Request) {

1
weed/server/master_grpc_server.go

@ -165,6 +165,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
glog.V(4).Infof("master received heartbeat %s", heartbeat.String())
stats.MasterReceivedHeartbeatCounter.WithLabelValues("total").Inc()
// TODO(issues/7977): process status heartbeat updates from volume servers
message := &master_pb.VolumeLocation{
Url: dn.Url(),

13
weed/server/volume_grpc_client_to_master.go

@ -212,6 +212,19 @@ func (vs *VolumeServer) doHeartbeatWithRetry(masterAddress pb.ServerAddress, grp
port := uint32(vs.store.Port)
for {
select {
case stateMessage := <-vs.store.StateUpdateChan:
stateBeat := &master_pb.Heartbeat{
Ip: ip,
Port: port,
DataCenter: dataCenter,
Rack: rack,
State: stateMessage,
}
glog.V(0).Infof("volume server %s:%d updates state to %v", vs.store.Ip, vs.store.Port, stateMessage)
if err = stream.Send(stateBeat); err != nil {
glog.V(0).Infof("Volume Server Failed to update state to master %s: %v", masterAddress, err)
return "", err
}
case volumeMessage := <-vs.store.NewVolumesChan:
deltaBeat := &master_pb.Heartbeat{
Ip: ip,

9
weed/storage/disk_location.go

@ -19,6 +19,11 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util"
)
const (
UUIDFileName = "vol_dir.uuid"
UUIDFileMod = 0644
)
type DiskLocation struct {
Directory string
DirectoryUuid string
@ -42,7 +47,7 @@ type DiskLocation struct {
func GenerateDirUuid(dir string) (dirUuidString string, err error) {
glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
fileName := dir + "/vol_dir.uuid"
fileName := filepath.Join(dir, UUIDFileName)
if !util.FileExists(fileName) {
dirUuidString, err = writeNewUuid(fileName)
} else {
@ -62,7 +67,7 @@ func GenerateDirUuid(dir string) (dirUuidString string, err error) {
func writeNewUuid(fileName string) (string, error) {
dirUuid, _ := uuid.NewRandom()
dirUuidString := dirUuid.String()
if err := util.WriteFile(fileName, []byte(dirUuidString), 0644); err != nil {
if err := util.WriteFile(fileName, []byte(dirUuidString), UUIDFileMod); err != nil {
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, err)
}
return dirUuidString, nil

76
weed/storage/store.go

@ -16,6 +16,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
@ -25,6 +26,7 @@ import (
const (
MAX_TTL_VOLUME_REMOVAL_DELAY = 10 // 10 minutes
HEARTBEAT_CHAN_SIZE = 1024
)
type ReadOption struct {
@ -69,6 +71,8 @@ type Store struct {
rack string // optional information, overwriting master setting if exists
connected bool
NeedleMapKind NeedleMapKind
State *State
StateUpdateChan chan *volume_server_pb.VolumeServerState
NewVolumesChan chan master_pb.VolumeShortInformationMessage
DeletedVolumesChan chan master_pb.VolumeShortInformationMessage
NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage
@ -81,16 +85,31 @@ func (s *Store) String() (str string) {
return
}
func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int, publicUrl string, id string, dirnames []string, maxVolumeCounts []int32,
minFreeSpaces []util.MinFreeSpace, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType, ldbTimeout int64) (s *Store) {
s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, GrpcPort: grpcPort, PublicUrl: publicUrl, Id: id, NeedleMapKind: needleMapKind}
s.Locations = make([]*DiskLocation, 0)
s.NewVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 1024)
s.DeletedVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 1024)
s.NewEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 1024)
s.DeletedEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 1024)
func NewStore(
grpcDialOption grpc.DialOption,
ip string, port int, grpcPort int, publicUrl string, id string,
dirnames []string, maxVolumeCounts []int32, minFreeSpaces []util.MinFreeSpace,
idxFolder string,
needleMapKind NeedleMapKind,
diskTypes []DiskType,
ldbTimeout int64,
) (s *Store) {
s = &Store{
grpcDialOption: grpcDialOption,
Port: port,
Ip: ip,
GrpcPort: grpcPort,
PublicUrl: publicUrl,
Id: id,
NeedleMapKind: needleMapKind,
Locations: make([]*DiskLocation, 0),
StateUpdateChan: make(chan *volume_server_pb.VolumeServerState, HEARTBEAT_CHAN_SIZE),
NewVolumesChan: make(chan master_pb.VolumeShortInformationMessage, HEARTBEAT_CHAN_SIZE),
DeletedVolumesChan: make(chan master_pb.VolumeShortInformationMessage, HEARTBEAT_CHAN_SIZE),
NewEcShardsChan: make(chan master_pb.VolumeEcShardInformationMessage, HEARTBEAT_CHAN_SIZE),
DeletedEcShardsChan: make(chan master_pb.VolumeEcShardInformationMessage, HEARTBEAT_CHAN_SIZE),
}
var wg sync.WaitGroup
for i := 0; i < len(dirnames); i++ {
@ -130,8 +149,44 @@ func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int,
}
wg.Wait()
var err error
s.State, err = NewState(idxFolder)
if err != nil {
glog.Fatalf("failed to resolve state for volume %s: %v", id, err)
}
return
}
func (s *Store) LoadState() error {
err := s.State.Load()
if s.State.Pb != nil && err == nil {
select {
case s.StateUpdateChan <- s.State.Pb:
default:
glog.V(2).Infof("StateUpdateChan full during LoadState, state will be reported in heartbeat")
}
}
return err
}
func (s *Store) SaveState() error {
if s.State.Pb == nil {
glog.Warningf("tried to save empty state for store %s", s.Id)
return nil
}
err := s.State.Save()
if s.State.Pb != nil && err == nil {
select {
case s.StateUpdateChan <- s.State.Pb:
default:
glog.V(2).Infof("StateUpdateChan full during SaveState, state will be reported in heartbeat")
}
}
return err
}
func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, ver needle.Version, MemoryMapMaxSizeMb uint32, diskType DiskType, ldbTimeout int64) error {
rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement)
if e != nil {
@ -144,6 +199,7 @@ func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMap
e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, ver, MemoryMapMaxSizeMb, diskType, ldbTimeout)
return e
}
func (s *Store) DeleteCollection(collection string) (e error) {
for _, location := range s.Locations {
e = location.DeleteCollectionFromDiskLocation(collection)

71
weed/storage/store_state.go

@ -0,0 +1,71 @@
package storage
import (
"fmt"
"os"
"path/filepath"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/protobuf/proto"
)
const (
StateFileName = "state.pb"
StateFileMode = 0644
)
type State struct {
FilePath string
Pb *volume_server_pb.VolumeServerState
}
func NewState(dir string) (*State, error) {
state := &State{
FilePath: filepath.Join(dir, StateFileName),
Pb: nil,
}
err := state.Load()
return state, err
}
func (st *State) Load() error {
st.Pb = &volume_server_pb.VolumeServerState{}
if !util.FileExists(st.FilePath) {
glog.V(1).Infof("No preexisting store state at %s", st.FilePath)
return nil
}
binPb, err := os.ReadFile(st.FilePath)
if err != nil {
st.Pb = nil
return fmt.Errorf("failed to read store state from %s : %v", st.FilePath, err)
}
if err := proto.Unmarshal(binPb, st.Pb); err != nil {
st.Pb = nil
return fmt.Errorf("failed to parse store state from %s : %v", st.FilePath, err)
}
glog.V(1).Infof("Got store state from %s: %v", st.FilePath, st.Pb)
return nil
}
func (st *State) Save() error {
if st.Pb == nil {
st.Pb = &volume_server_pb.VolumeServerState{}
}
binPb, err := proto.Marshal(st.Pb)
if err != nil {
return fmt.Errorf("failed to serialize store state %v: %s", st.Pb, err)
}
if err := util.WriteFile(st.FilePath, binPb, StateFileMode); err != nil {
return fmt.Errorf("failed to write store state to %s : %v", st.FilePath, err)
}
glog.V(1).Infof("Saved store state %v to %s", st.Pb, st.FilePath)
return nil
}
Loading…
Cancel
Save