org.slf4j
diff --git a/test/s3tables/sts_integration/sts_integration_test.go b/test/s3tables/sts_integration/sts_integration_test.go
index 33b78e06f..a6e4c7bbd 100644
--- a/test/s3tables/sts_integration/sts_integration_test.go
+++ b/test/s3tables/sts_integration/sts_integration_test.go
@@ -33,6 +33,8 @@ type TestEnvironment struct {
secretKey string
}
+const testSTSIntegrationSigningKey = "dGVzdC1zaWduaW5nLWtleS1mb3Itc3RzLWludGVncmF0aW9uLXRlc3Rz" // gitleaks:allow - test-signing-key-for-sts-integration-tests
+
func TestSTSIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
@@ -110,17 +112,70 @@ func NewTestEnvironment(t *testing.T) *TestEnvironment {
volumePort: volumePort,
volumeGrpcPort: volumeGrpcPort,
dockerAvailable: testutil.HasDocker(),
- accessKey: "admin", // Matching default in testutil.WriteIAMConfig
- secretKey: "admin",
+ accessKey: "admin",
+ secretKey: "adminadmin",
}
}
func (env *TestEnvironment) StartSeaweedFS(t *testing.T) {
t.Helper()
- // Create IAM config file
- iamConfigPath, err := testutil.WriteIAMConfig(env.dataDir, env.accessKey, env.secretKey)
- if err != nil {
+ iamConfigPath := filepath.Join(env.dataDir, "iam.json")
+ // Note: signingKey must be base64 encoded for []byte JSON unmarshaling
+ iamConfig := fmt.Sprintf(`{
+ "identities": [
+ {
+ "name": "admin",
+ "credentials": [
+ { "accessKey": "%s", "secretKey": "%s" }
+ ],
+ "actions": ["Admin", "Read", "Write", "List", "Tagging"]
+ }
+ ],
+ "sts": {
+ "tokenDuration": "1h",
+ "maxSessionLength": "12h",
+ "issuer": "seaweedfs-sts",
+ "signingKey": "%s"
+ },
+ "policy": {
+ "defaultEffect": "Deny",
+ "storeType": "memory"
+ },
+ "policies": [
+ {
+ "name": "S3FullAccessPolicy",
+ "document": {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": ["s3:*"],
+ "Resource": ["*"]
+ }
+ ]
+ }
+ }
+ ],
+ "roles": [
+ {
+ "roleName": "TestRole",
+ "roleArn": "arn:aws:iam::role/TestRole",
+ "attachedPolicies": ["S3FullAccessPolicy"],
+ "trustPolicy": {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": "*",
+ "Action": ["sts:AssumeRole"]
+ }
+ ]
+ }
+ }
+ ]
+}`, env.accessKey, env.secretKey, testSTSIntegrationSigningKey)
+ if err := os.WriteFile(iamConfigPath, []byte(iamConfig), 0644); err != nil {
t.Fatalf("Failed to create IAM config: %v", err)
}
@@ -143,6 +198,8 @@ func (env *TestEnvironment) StartSeaweedFS(t *testing.T) {
"-s3.port", fmt.Sprintf("%d", env.s3Port),
"-s3.port.grpc", fmt.Sprintf("%d", env.s3GrpcPort),
"-s3.config", iamConfigPath,
+ "-s3.iam.config", iamConfigPath,
+ "-s3.iam.readOnly", "false",
"-ip", env.bindIP,
"-ip.bind", "0.0.0.0",
"-dir", env.dataDir,
@@ -190,22 +247,62 @@ func runPythonSTSClient(t *testing.T, env *TestEnvironment) {
import boto3
import botocore.config
from botocore.exceptions import ClientError
-import os
+import json
import sys
+import time
+import urllib.error
+import urllib.request
-print("Starting STS test...")
+print("Starting STS inline session policy test...")
-endpoint_url = "http://host.docker.internal:%d"
+primary_endpoint = "http://host.docker.internal:%d"
+fallback_endpoint = "http://%s:%d"
access_key = "%s"
secret_key = "%s"
region = "us-east-1"
-print(f"Connecting to {endpoint_url} with key {access_key}")
-
try:
+ def wait_for_endpoint(url, timeout=30):
+ deadline = time.time() + timeout
+ while time.time() < deadline:
+ try:
+ with urllib.request.urlopen(url, timeout=2):
+ return True
+ except urllib.error.HTTPError:
+ return True
+ except Exception:
+ time.sleep(1)
+ return False
+
+ def select_endpoint(urls):
+ for url in urls:
+ if wait_for_endpoint(url):
+ return url
+ raise Exception("No reachable S3 endpoint from container")
+
+ endpoint_url = select_endpoint([primary_endpoint, fallback_endpoint])
+ print(f"Using endpoint {endpoint_url}")
+
config = botocore.config.Config(
- retries={'max_attempts': 0}
+ retries={'max_attempts': 0},
+ s3={'addressing_style': 'path'}
+ )
+ admin_s3 = boto3.client(
+ 's3',
+ endpoint_url=endpoint_url,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ region_name=region,
+ config=config
)
+
+ bucket = f"sts-inline-policy-{int(time.time() * 1000)}"
+ key = "allowed.txt"
+
+ print(f"Creating bucket {bucket} with admin credentials")
+ admin_s3.create_bucket(Bucket=bucket)
+ admin_s3.put_object(Bucket=bucket, Key=key, Body=b"ok")
+
sts = boto3.client(
'sts',
endpoint_url=endpoint_url,
@@ -215,45 +312,77 @@ try:
config=config
)
- role_arn = "arn:aws:iam::000000000000:role/test-role"
+ role_arn = "arn:aws:iam::role/TestRole"
session_name = "test-session"
-
- print(f"Calling AssumeRole on {role_arn}")
-
- # This call typically sends parameters in POST body by default in boto3
+ session_policy = json.dumps({
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": ["s3:ListBucket"],
+ "Resource": [f"arn:aws:s3:::{bucket}"]
+ },
+ {
+ "Effect": "Allow",
+ "Action": ["s3:GetObject"],
+ "Resource": [f"arn:aws:s3:::{bucket}/*"]
+ }
+ ]
+ })
+
+ print(f"Calling AssumeRole on {role_arn} with inline session policy")
response = sts.assume_role(
RoleArn=role_arn,
- RoleSessionName=session_name
+ RoleSessionName=session_name,
+ Policy=session_policy
)
- print("Success! Got credentials:")
- print(response['Credentials'])
-
-except ClientError as e:
- # Print available keys for debugging if needed
- # print(e.response.keys())
-
- response_meta = e.response.get('ResponseMetadata', {})
- http_code = response_meta.get('HTTPStatusCode')
-
- error_data = e.response.get('Error', {})
- error_code = error_data.get('Code', 'Unknown')
-
- print(f"Got error: {http_code} {error_code}")
-
- # We expect 503 ServiceUnavailable because stsHandlers is nil in weed mini
- # This confirms the request was routed to STS handler logic (UnifiedPostHandler)
- # instead of IAM handler (which would return 403 AccessDenied or 501 NotImplemented)
- if http_code == 503:
- print("SUCCESS: Got expected 503 Service Unavailable (STS not configured)")
- sys.exit(0)
-
- print(f"FAILED: Unexpected error {e}")
- sys.exit(1)
+ creds = response['Credentials']
+ vended_s3 = boto3.client(
+ 's3',
+ endpoint_url=endpoint_url,
+ aws_access_key_id=creds['AccessKeyId'],
+ aws_secret_access_key=creds['SecretAccessKey'],
+ aws_session_token=creds['SessionToken'],
+ region_name=region,
+ config=config
+ )
+
+ print("Listing objects (allowed)")
+ list_resp = vended_s3.list_objects_v2(Bucket=bucket)
+ keys = [obj.get('Key') for obj in list_resp.get('Contents', [])]
+ if key not in keys:
+ print(f"FAILED: Expected to see {key} in list_objects_v2 results")
+ sys.exit(1)
+
+ print("Getting object (allowed)")
+ body = vended_s3.get_object(Bucket=bucket, Key=key)['Body'].read()
+ if body != b"ok":
+ print("FAILED: Unexpected object content")
+ sys.exit(1)
+
+ print("Putting object (expected to be denied)")
+ try:
+ vended_s3.put_object(Bucket=bucket, Key="denied.txt", Body=b"no")
+ print("FAILED: PutObject unexpectedly succeeded")
+ sys.exit(1)
+ except ClientError as e:
+ error_code = e.response.get('Error', {}).get('Code', '')
+ if error_code != 'AccessDenied':
+ print(f"FAILED: Expected AccessDenied, got {error_code}")
+ sys.exit(1)
+ print("PutObject correctly denied by inline session policy")
+
+ print("SUCCESS: Inline session policy downscoping verified")
+ sys.exit(0)
except Exception as e:
print(f"FAILED: {e}")
+ if hasattr(e, 'response'):
+ print(f"Response: {e.response}")
+ import traceback
+ traceback.print_exc()
sys.exit(1)
-`, env.s3Port, env.accessKey, env.secretKey)
+`, env.s3Port, env.bindIP, env.s3Port, env.accessKey, env.secretKey)
scriptPath := filepath.Join(env.dataDir, "sts_test.py")
if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil {
diff --git a/test/volume_server/grpc/scrub_query_test.go b/test/volume_server/grpc/scrub_query_test.go
index 66766f79d..46b67e0ca 100644
--- a/test/volume_server/grpc/scrub_query_test.go
+++ b/test/volume_server/grpc/scrub_query_test.go
@@ -144,7 +144,7 @@ func TestQueryInvalidAndMissingFileIDPaths(t *testing.T) {
}
}
-func TestScrubVolumeAutoSelectAndNotImplementedModes(t *testing.T) {
+func TestScrubVolumeAutoSelectAndAllModes(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
@@ -158,6 +158,11 @@ func TestScrubVolumeAutoSelectAndNotImplementedModes(t *testing.T) {
framework.AllocateVolume(t, grpcClient, volumeIDA, "")
framework.AllocateVolume(t, grpcClient, volumeIDB, "")
+ // upload some data so index files are not zero-sized
+ httpClient := framework.NewHTTPClient()
+ framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), framework.NewFileID(volumeIDA, 1, 1), []byte("test data A"))
+ framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), framework.NewFileID(volumeIDB, 2, 2), []byte("test data B"))
+
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
@@ -181,11 +186,8 @@ func TestScrubVolumeAutoSelectAndNotImplementedModes(t *testing.T) {
if localResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume local mode expected total_volumes=1, got %d", localResp.GetTotalVolumes())
}
- if len(localResp.GetBrokenVolumeIds()) != 1 || localResp.GetBrokenVolumeIds()[0] != volumeIDA {
- t.Fatalf("ScrubVolume local mode expected broken volume %d, got %v", volumeIDA, localResp.GetBrokenVolumeIds())
- }
- if len(localResp.GetDetails()) == 0 || !strings.Contains(strings.Join(localResp.GetDetails(), " "), "not implemented") {
- t.Fatalf("ScrubVolume local mode expected not-implemented details, got %v", localResp.GetDetails())
+ if len(localResp.GetBrokenVolumeIds()) != 0 {
+ t.Fatalf("ScrubVolume local mode expected no broken volumes, got %v: %v", localResp.GetBrokenVolumeIds(), localResp.GetDetails())
}
fullResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
@@ -198,8 +200,8 @@ func TestScrubVolumeAutoSelectAndNotImplementedModes(t *testing.T) {
if fullResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume full mode expected total_volumes=1, got %d", fullResp.GetTotalVolumes())
}
- if len(fullResp.GetDetails()) == 0 || !strings.Contains(strings.Join(fullResp.GetDetails(), " "), "not implemented") {
- t.Fatalf("ScrubVolume full mode expected not-implemented details, got %v", fullResp.GetDetails())
+ if len(fullResp.GetBrokenVolumeIds()) != 0 {
+ t.Fatalf("ScrubVolume full mode expected no broken volumes, got %v: %v", fullResp.GetBrokenVolumeIds(), fullResp.GetDetails())
}
}
diff --git a/weed/admin/dash/admin_data.go b/weed/admin/dash/admin_data.go
index 9d33ee158..f7ea81338 100644
--- a/weed/admin/dash/admin_data.go
+++ b/weed/admin/dash/admin_data.go
@@ -9,13 +9,14 @@ import (
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/cluster"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/iam"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
// Access key status constants
const (
- AccessKeyStatusActive = "Active"
- AccessKeyStatusInactive = "Inactive"
+ AccessKeyStatusActive = iam.AccessKeyStatusActive
+ AccessKeyStatusInactive = iam.AccessKeyStatusInactive
)
type AdminData struct {
diff --git a/weed/admin/dash/service_account_management.go b/weed/admin/dash/service_account_management.go
index c83ce868f..d8a6c7033 100644
--- a/weed/admin/dash/service_account_management.go
+++ b/weed/admin/dash/service_account_management.go
@@ -8,6 +8,7 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/iam"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
)
@@ -21,8 +22,8 @@ const (
accessKeyPrefix = "ABIA" // Service account access keys use ABIA prefix
// Status constants
- StatusActive = "Active"
- StatusInactive = "Inactive"
+ StatusActive = iam.AccessKeyStatusActive
+ StatusInactive = iam.AccessKeyStatusInactive
)
// GetServiceAccounts returns all service accounts, optionally filtered by parent user
diff --git a/weed/admin/view/app/service_accounts.templ b/weed/admin/view/app/service_accounts.templ
index d057278f5..4696c63b9 100644
--- a/weed/admin/view/app/service_accounts.templ
+++ b/weed/admin/view/app/service_accounts.templ
@@ -126,7 +126,7 @@ templ ServiceAccounts(data dash.ServiceAccountsData) {
{sa.AccessKeyId}
- if sa.Status == "Active" {
+ if sa.Status == dash.StatusActive {
Active
} else {
Inactive
@@ -141,7 +141,7 @@ templ ServiceAccounts(data dash.ServiceAccountsData) {
|