Browse Source

s3: enforce authentication and JSON error format for Iceberg REST Catalog (#8192)

* s3: enforce authentication and JSON error format for Iceberg REST Catalog

* s3/iceberg: align error exception types with OpenAPI spec examples

* s3api: refactor AuthenticateRequest to return identity object

* s3/iceberg: propagate full identity object to request context

* s3/iceberg: differentiate NotAuthorizedException and ForbiddenException

* s3/iceberg: reject requests if authenticator is nil to prevent auth bypass

* s3/iceberg: refactor Auth middleware to build context incrementally and use switch for error mapping

* s3api: update misleading comment for authRequestWithAuthType

* s3api: return ErrAccessDenied if IAM is not configured to prevent auth bypass

* s3/iceberg: optimize context update in Auth middleware

* s3api: export CanDo for external authorization use

* s3/iceberg: enforce identity-based authorization in all API handlers

* s3api: fix compilation errors by updating internal CanDo references

* s3/iceberg: robust identity validation and consistent action usage in handlers

* s3api: complete CanDo rename across tests and policy engine integration

* s3api: fix integration tests by allowing admin access when auth is disabled and explicit gRPC ports

* duckdb

* create test bucket
master
Chris Lu 10 hours ago
committed by GitHub
parent
commit
1274cf038c
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 63
      test/s3tables/catalog/iceberg_catalog_test.go
  2. 2
      weed/command/s3.go
  3. 27
      weed/s3api/auth_credentials.go
  4. 90
      weed/s3api/auth_credentials_test.go
  5. 4
      weed/s3api/auth_security_test.go
  6. 2
      weed/s3api/auth_signature_v2.go
  7. 2
      weed/s3api/auth_signature_v4.go
  8. 8
      weed/s3api/auth_signature_v4_sts_test.go
  9. 16
      weed/s3api/auth_sts_identity_test.go
  10. 183
      weed/s3api/iceberg/iceberg.go
  11. 4
      weed/s3api/policy_engine/integration.go
  12. 2
      weed/s3api/s3api_bucket_handlers.go
  13. 16
      weed/s3api/s3api_bucket_handlers_test.go
  14. 6
      weed/s3api/s3api_governance_permissions_test.go
  15. 6
      weed/s3api/s3api_key_rotation.go
  16. 2
      weed/s3api/s3api_object_handlers_acl.go
  17. 16
      weed/s3api/s3api_object_handlers_list_test.go
  18. 4
      weed/s3api/s3api_object_retention.go
  19. 12
      weed/s3api/s3api_server.go

63
test/s3tables/catalog/iceberg_catalog_test.go

@ -22,10 +22,14 @@ type TestEnvironment struct {
weedBinary string
dataDir string
s3Port int
s3GrpcPort int
icebergPort int
masterPort int
masterGrpcPort int
filerPort int
filerGrpcPort int
volumePort int
volumeGrpcPort int
weedProcess *exec.Cmd
weedCancel context.CancelFunc
dockerAvailable bool
@ -93,28 +97,49 @@ func NewTestEnvironment(t *testing.T) *TestEnvironment {
if err != nil {
t.Fatalf("Failed to get free port for Iceberg: %v", err)
}
s3GrpcPort, err := getFreePort()
if err != nil {
t.Fatalf("Failed to get free port for S3 gRPC: %v", err)
}
masterPort, err := getFreePort()
if err != nil {
t.Fatalf("Failed to get free port for Master: %v", err)
}
masterGrpcPort, err := getFreePort()
if err != nil {
t.Fatalf("Failed to get free port for Master gRPC: %v", err)
}
filerPort, err := getFreePort()
if err != nil {
t.Fatalf("Failed to get free port for Filer: %v", err)
}
filerGrpcPort, err := getFreePort()
if err != nil {
t.Fatalf("Failed to get free port for Filer gRPC: %v", err)
}
volumePort, err := getFreePort()
if err != nil {
t.Fatalf("Failed to get free port for Volume: %v", err)
}
volumeGrpcPort, err := getFreePort()
if err != nil {
t.Fatalf("Failed to get free port for Volume gRPC: %v", err)
}
return &TestEnvironment{
seaweedDir: seaweedDir,
weedBinary: weedBinary,
dataDir: dataDir,
s3Port: s3Port,
s3GrpcPort: s3GrpcPort,
icebergPort: icebergPort,
masterPort: masterPort,
masterGrpcPort: masterGrpcPort,
filerPort: filerPort,
filerGrpcPort: filerGrpcPort,
volumePort: volumePort,
volumeGrpcPort: volumeGrpcPort,
dockerAvailable: hasDocker(),
}
}
@ -138,9 +163,13 @@ func (env *TestEnvironment) StartSeaweedFS(t *testing.T) {
cmd := exec.CommandContext(ctx, env.weedBinary, "mini",
"-master.port", fmt.Sprintf("%d", env.masterPort),
"-master.port.grpc", fmt.Sprintf("%d", env.masterGrpcPort),
"-volume.port", fmt.Sprintf("%d", env.volumePort),
"-volume.port.grpc", fmt.Sprintf("%d", env.volumeGrpcPort),
"-filer.port", fmt.Sprintf("%d", env.filerPort),
"-filer.port.grpc", fmt.Sprintf("%d", env.filerGrpcPort),
"-s3.port", fmt.Sprintf("%d", env.s3Port),
"-s3.port.grpc", fmt.Sprintf("%d", env.s3GrpcPort),
"-s3.port.iceberg", fmt.Sprintf("%d", env.icebergPort),
"-dir", env.dataDir,
)
@ -244,6 +273,9 @@ func TestIcebergNamespaces(t *testing.T) {
env.StartSeaweedFS(t)
// Create the default table bucket first via S3
createTableBucket(t, env, "default")
// Test GET /v1/namespaces (should return empty list initially)
resp, err := http.Get(env.IcebergURL() + "/v1/namespaces")
if err != nil {
@ -257,6 +289,33 @@ func TestIcebergNamespaces(t *testing.T) {
}
}
// createTableBucket creates a table bucket via the S3Tables REST API
func createTableBucket(t *testing.T, env *TestEnvironment, bucketName string) {
t.Helper()
// Use S3Tables REST API to create the bucket
endpoint := fmt.Sprintf("http://localhost:%d/buckets", env.s3Port)
reqBody := fmt.Sprintf(`{"name":"%s"}`, bucketName)
req, err := http.NewRequest(http.MethodPut, endpoint, strings.NewReader(reqBody))
if err != nil {
t.Fatalf("Failed to create request: %v", err)
}
req.Header.Set("Content-Type", "application/x-amz-json-1.1")
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Failed to create table bucket %s: %v", bucketName, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusConflict {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("Failed to create table bucket %s, status %d: %s", bucketName, resp.StatusCode, body)
}
t.Logf("Created table bucket %s", bucketName)
}
// TestDuckDBIntegration tests Iceberg catalog operations using DuckDB
// This test requires Docker to be available
func TestDuckDBIntegration(t *testing.T) {
@ -294,8 +353,10 @@ SELECT 'Iceberg extension loaded successfully' as result;
cmd := exec.Command("docker", "run", "--rm",
"-v", fmt.Sprintf("%s:/test", env.dataDir),
"--add-host", "host.docker.internal:host-gateway",
"--entrypoint", "duckdb",
"duckdb/duckdb:latest",
"-c", ".read /test/test.sql",
"-init", "/test/test.sql",
"-c", "SELECT 1",
)
output, err := cmd.CombinedOutput()

2
weed/command/s3.go

@ -478,7 +478,7 @@ func (s3opt *S3Options) startIcebergServer(s3ApiServer *s3api.S3ApiServer) {
icebergRouter := mux.NewRouter().SkipClean(true)
// Create Iceberg server using the S3ApiServer as filer client
icebergServer := iceberg.NewServer(s3ApiServer)
icebergServer := iceberg.NewServer(s3ApiServer, s3ApiServer)
icebergServer.RegisterRoutes(icebergRouter)
listenAddress := fmt.Sprintf("%s:%d", *s3opt.bindIp, *s3opt.portIceberg)

27
weed/s3api/auth_credentials.go

@ -1084,7 +1084,21 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
}
// check whether the request has valid access keys
func (iam *IdentityAccessManagement) authRequestWithAuthType(r *http.Request, action Action) (*Identity, s3err.ErrorCode, authType) {
// AuthenticateRequest verifies the credentials in the request and returns the identity.
// It bypasses permission checks (authorization).
func (iam *IdentityAccessManagement) AuthenticateRequest(r *http.Request) (*Identity, s3err.ErrorCode) {
if !iam.isAuthEnabled {
return &Identity{
Name: "admin",
Account: &AccountAdmin,
Actions: []Action{s3_constants.ACTION_ADMIN},
}, s3err.ErrNone
}
ident, err, _ := iam.authenticateRequestInternal(r)
return ident, err
}
func (iam *IdentityAccessManagement) authenticateRequestInternal(r *http.Request) (*Identity, s3err.ErrorCode, authType) {
var identity *Identity
var s3Err s3err.ErrorCode
var found bool
@ -1138,6 +1152,13 @@ func (iam *IdentityAccessManagement) authRequestWithAuthType(r *http.Request, ac
if len(amzAuthType) > 0 {
r.Header.Set(s3_constants.AmzAuthType, amzAuthType)
}
return identity, s3Err, reqAuthType
}
// authRequestWithAuthType authenticates and then authorizes a request for a given action.
func (iam *IdentityAccessManagement) authRequestWithAuthType(r *http.Request, action Action) (*Identity, s3err.ErrorCode, authType) {
identity, s3Err, reqAuthType := iam.authenticateRequestInternal(r)
if s3Err != s3err.ErrNone {
return identity, s3Err, reqAuthType
}
@ -1274,7 +1295,7 @@ func (iam *IdentityAccessManagement) AuthSignatureOnly(r *http.Request) (*Identi
return identity, s3err.ErrNone
}
func (identity *Identity) canDo(action Action, bucket string, objectKey string) bool {
func (identity *Identity) CanDo(action Action, bucket string, objectKey string) bool {
if identity == nil {
return false
}
@ -1507,7 +1528,7 @@ func (iam *IdentityAccessManagement) VerifyActionPermission(r *http.Request, ide
// Traditional identities (with Actions from -s3.config) use legacy auth,
// JWT/STS identities (no Actions) use IAM authorization
if len(identity.Actions) > 0 {
if !identity.canDo(action, bucket, object) {
if !identity.CanDo(action, bucket, object) {
return s3err.ErrAccessDenied
}
return s3err.ErrNone

90
weed/s3api/auth_credentials_test.go

@ -86,11 +86,11 @@ func TestCanDo(t *testing.T) {
},
}
// object specific
assert.Equal(t, true, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d/e.txt"))
assert.Equal(t, false, ident1.canDo(ACTION_DELETE_BUCKET, "bucket1", ""))
assert.Equal(t, false, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/other/some"), "action without *")
assert.Equal(t, false, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/*"), "action on parent directory")
assert.Equal(t, true, ident1.CanDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident1.CanDo(ACTION_WRITE, "bucket1", "/a/b/c/d/e.txt"))
assert.Equal(t, false, ident1.CanDo(ACTION_DELETE_BUCKET, "bucket1", ""))
assert.Equal(t, false, ident1.CanDo(ACTION_WRITE, "bucket1", "/a/b/other/some"), "action without *")
assert.Equal(t, false, ident1.CanDo(ACTION_WRITE, "bucket1", "/a/b/*"), "action on parent directory")
// bucket specific
ident2 := &Identity{
@ -101,11 +101,11 @@ func TestCanDo(t *testing.T) {
"WriteAcp:bucket1",
},
}
assert.Equal(t, true, ident2.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident2.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident2.canDo(ACTION_WRITE_ACP, "bucket1", ""))
assert.Equal(t, false, ident2.canDo(ACTION_READ_ACP, "bucket1", ""))
assert.Equal(t, false, ident2.canDo(ACTION_LIST, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident2.CanDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident2.CanDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident2.CanDo(ACTION_WRITE_ACP, "bucket1", ""))
assert.Equal(t, false, ident2.CanDo(ACTION_READ_ACP, "bucket1", ""))
assert.Equal(t, false, ident2.CanDo(ACTION_LIST, "bucket1", "/a/b/c/d.txt"))
// across buckets
ident3 := &Identity{
@ -115,10 +115,10 @@ func TestCanDo(t *testing.T) {
"Write",
},
}
assert.Equal(t, true, ident3.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident3.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, false, ident3.canDo(ACTION_LIST, "bucket1", "/a/b/other/some"))
assert.Equal(t, false, ident3.canDo(ACTION_WRITE_ACP, "bucket1", ""))
assert.Equal(t, true, ident3.CanDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident3.CanDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, false, ident3.CanDo(ACTION_LIST, "bucket1", "/a/b/other/some"))
assert.Equal(t, false, ident3.CanDo(ACTION_WRITE_ACP, "bucket1", ""))
// partial buckets
ident4 := &Identity{
@ -128,9 +128,9 @@ func TestCanDo(t *testing.T) {
"ReadAcp:special_*",
},
}
assert.Equal(t, true, ident4.canDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt"))
assert.Equal(t, true, ident4.canDo(ACTION_READ_ACP, "special_bucket", ""))
assert.Equal(t, false, ident4.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
assert.Equal(t, true, ident4.CanDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt"))
assert.Equal(t, true, ident4.CanDo(ACTION_READ_ACP, "special_bucket", ""))
assert.Equal(t, false, ident4.CanDo(ACTION_READ, "bucket1", "/a/b/c/d.txt"))
// admin buckets
ident5 := &Identity{
@ -139,10 +139,10 @@ func TestCanDo(t *testing.T) {
"Admin:special_*",
},
}
assert.Equal(t, true, ident5.canDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt"))
assert.Equal(t, true, ident5.canDo(ACTION_READ_ACP, "special_bucket", ""))
assert.Equal(t, true, ident5.canDo(ACTION_WRITE, "special_bucket", "/a/b/c/d.txt"))
assert.Equal(t, true, ident5.canDo(ACTION_WRITE_ACP, "special_bucket", ""))
assert.Equal(t, true, ident5.CanDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt"))
assert.Equal(t, true, ident5.CanDo(ACTION_READ_ACP, "special_bucket", ""))
assert.Equal(t, true, ident5.CanDo(ACTION_WRITE, "special_bucket", "/a/b/c/d.txt"))
assert.Equal(t, true, ident5.CanDo(ACTION_WRITE_ACP, "special_bucket", ""))
// anonymous buckets
ident6 := &Identity{
@ -151,7 +151,7 @@ func TestCanDo(t *testing.T) {
"Read",
},
}
assert.Equal(t, true, ident6.canDo(ACTION_READ, "anything_bucket", "/a/b/c/d.txt"))
assert.Equal(t, true, ident6.CanDo(ACTION_READ, "anything_bucket", "/a/b/c/d.txt"))
//test deleteBucket operation
ident7 := &Identity{
@ -160,7 +160,7 @@ func TestCanDo(t *testing.T) {
"DeleteBucket:bucket1",
},
}
assert.Equal(t, true, ident7.canDo(ACTION_DELETE_BUCKET, "bucket1", ""))
assert.Equal(t, true, ident7.CanDo(ACTION_DELETE_BUCKET, "bucket1", ""))
}
func TestMatchWildcardPattern(t *testing.T) {
@ -580,7 +580,7 @@ func TestBucketLevelListPermissions(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := identity.canDo(tc.action, tc.bucket, tc.object)
result := identity.CanDo(tc.action, tc.bucket, tc.object)
assert.Equal(t, tc.shouldAllow, result, tc.description)
})
}
@ -599,7 +599,7 @@ func TestBucketLevelListPermissions(t *testing.T) {
testCases := []string{"anybucket", "mybucket", "test-bucket", "prod-data"}
for _, bucket := range testCases {
result := identity.canDo("List", bucket, "")
result := identity.CanDo("List", bucket, "")
assert.True(t, result, "Global List permission should allow access to bucket %s", bucket)
}
})
@ -614,9 +614,9 @@ func TestBucketLevelListPermissions(t *testing.T) {
}
// Should only allow access to the exact bucket
assert.True(t, identity.canDo("List", "specificbucket", ""), "Should allow access to exact bucket")
assert.False(t, identity.canDo("List", "specificbucket-test", ""), "Should deny access to bucket with suffix")
assert.False(t, identity.canDo("List", "otherbucket", ""), "Should deny access to different bucket")
assert.True(t, identity.CanDo("List", "specificbucket", ""), "Should allow access to exact bucket")
assert.False(t, identity.CanDo("List", "specificbucket-test", ""), "Should deny access to bucket with suffix")
assert.False(t, identity.CanDo("List", "otherbucket", ""), "Should deny access to different bucket")
})
t.Log("This test validates the fix for issue #7066")
@ -639,26 +639,26 @@ func TestListBucketsAuthRequest(t *testing.T) {
}
// Test 1: ListBuckets operation should succeed (bucket = "")
// This would have failed before the fix because canDo("List", "", "") would return false
// After the fix, it bypasses the canDo check for ListBuckets operations
// This would have failed before the fix because CanDo("List", "", "") would return false
// After the fix, it bypasses the CanDo check for ListBuckets operations
// Simulate what happens in authRequest for ListBuckets:
// action = ACTION_LIST, bucket = "", object = ""
// Before fix: identity.canDo(ACTION_LIST, "", "") would fail
// After fix: the canDo check should be bypassed
// Before fix: identity.CanDo(ACTION_LIST, "", "") would fail
// After fix: the CanDo check should be bypassed
// Test the individual canDo method to show it would fail without the special case
result := identity.canDo(Action(ACTION_LIST), "", "")
assert.False(t, result, "canDo should return false for empty bucket with bucket-specific permissions")
// Test the individual CanDo method to show it would fail without the special case
result := identity.CanDo(Action(ACTION_LIST), "", "")
assert.False(t, result, "CanDo should return false for empty bucket with bucket-specific permissions")
// Test with a specific bucket that matches the permission
result2 := identity.canDo(Action(ACTION_LIST), "mybucket", "")
assert.True(t, result2, "canDo should return true for matching bucket")
result2 := identity.CanDo(Action(ACTION_LIST), "mybucket", "")
assert.True(t, result2, "CanDo should return true for matching bucket")
// Test with a specific bucket that doesn't match
result3 := identity.canDo(Action(ACTION_LIST), "otherbucket", "")
assert.False(t, result3, "canDo should return false for non-matching bucket")
result3 := identity.CanDo(Action(ACTION_LIST), "otherbucket", "")
assert.False(t, result3, "CanDo should return false for non-matching bucket")
})
t.Run("Object listing maintains permission enforcement", func(t *testing.T) {
@ -675,14 +675,14 @@ func TestListBucketsAuthRequest(t *testing.T) {
// These operations have a specific bucket in the URL
// Should succeed for allowed bucket
result1 := identity.canDo(Action(ACTION_LIST), "mybucket", "prefix/")
result1 := identity.CanDo(Action(ACTION_LIST), "mybucket", "prefix/")
assert.True(t, result1, "Should allow listing objects in permitted bucket")
result2 := identity.canDo(Action(ACTION_LIST), "mybucket-prod", "")
result2 := identity.CanDo(Action(ACTION_LIST), "mybucket-prod", "")
assert.True(t, result2, "Should allow listing objects in wildcard-matched bucket")
// Should fail for disallowed bucket
result3 := identity.canDo(Action(ACTION_LIST), "otherbucket", "")
result3 := identity.CanDo(Action(ACTION_LIST), "otherbucket", "")
assert.False(t, result3, "Should deny listing objects in non-permitted bucket")
})
@ -734,11 +734,11 @@ func TestSignatureVerificationDoesNotCheckPermissions(t *testing.T) {
assert.Equal(t, "list_secret_key", cred.SecretKey)
// User should have the correct permissions
assert.True(t, identity.canDo(Action(ACTION_LIST), "bucket-123", ""))
assert.True(t, identity.canDo(Action(ACTION_READ), "bucket-123", ""))
assert.True(t, identity.CanDo(Action(ACTION_LIST), "bucket-123", ""))
assert.True(t, identity.CanDo(Action(ACTION_READ), "bucket-123", ""))
// User should NOT have write permissions
assert.False(t, identity.canDo(Action(ACTION_WRITE), "bucket-123", ""))
assert.False(t, identity.CanDo(Action(ACTION_WRITE), "bucket-123", ""))
})
t.Log("This test validates the fix for issue #7334")

4
weed/s3api/auth_security_test.go

@ -98,8 +98,8 @@ func TestReproIssue7912(t *testing.T) {
var nilIdentity *Identity
// Test isAdmin guard
assert.False(t, nilIdentity.isAdmin())
// Test canDo guard
assert.False(t, nilIdentity.canDo(s3_constants.ACTION_LIST, "bucket", "object"))
// Test CanDo guard
assert.False(t, nilIdentity.CanDo(s3_constants.ACTION_LIST, "bucket", "object"))
})
t.Run("AuthSignatureOnly path", func(t *testing.T) {

2
weed/s3api/auth_signature_v2.go

@ -96,7 +96,7 @@ func (iam *IdentityAccessManagement) doesPolicySignatureV2Match(formValues http.
}
bucket := formValues.Get("bucket")
if !identity.canDo(s3_constants.ACTION_WRITE, bucket, "") {
if !identity.CanDo(s3_constants.ACTION_WRITE, bucket, "") {
return s3err.ErrAccessDenied
}

2
weed/s3api/auth_signature_v4.go

@ -713,7 +713,7 @@ func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.
}
bucket := formValues.Get("bucket")
if !identity.canDo(s3_constants.ACTION_WRITE, bucket, "") {
if !identity.CanDo(s3_constants.ACTION_WRITE, bucket, "") {
return s3err.ErrAccessDenied
}

8
weed/s3api/auth_signature_v4_sts_test.go

@ -108,7 +108,7 @@ func TestVerifyV4SignatureWithSTSIdentity(t *testing.T) {
description: "STS identity should be denied when no IAM integration is available",
},
{
name: "Traditional identity with Actions - should use canDo",
name: "Traditional identity with Actions - should use CanDo",
identity: &Identity{
Name: "traditional-user",
Account: &AccountAdmin,
@ -117,10 +117,10 @@ func TestVerifyV4SignatureWithSTSIdentity(t *testing.T) {
shouldCheckPermissions: true,
iamIntegration: nil, // IAM integration not needed for traditional identities
expectedError: s3err.ErrNone,
description: "Traditional identity with Actions should use canDo check",
description: "Traditional identity with Actions should use CanDo check",
},
{
name: "Traditional identity with Actions - canDo denies",
name: "Traditional identity with Actions - CanDo denies",
identity: &Identity{
Name: "read-only-user",
Account: &AccountAdmin,
@ -129,7 +129,7 @@ func TestVerifyV4SignatureWithSTSIdentity(t *testing.T) {
shouldCheckPermissions: true,
iamIntegration: nil,
expectedError: s3err.ErrAccessDenied,
description: "Traditional identity should be denied when canDo fails (PUT requires WRITE)",
description: "Traditional identity should be denied when CanDo fails (PUT requires WRITE)",
},
{
name: "shouldCheckPermissions false - skip authorization",

16
weed/s3api/auth_sts_identity_test.go

@ -78,9 +78,9 @@ func TestSTSIdentityPolicyNamesPopulation(t *testing.T) {
// Verify that Actions is empty (STS identities should use IAM authorization, not legacy Actions)
assert.Empty(t, identity.Actions, "STS identities should have empty Actions to trigger IAM authorization path")
// Verify legacy canDo returns false (forcing fallback to IAM)
assert.False(t, identity.canDo("Read", "test-bucket", "/any/path"),
"canDo should return false for STS identities with no Actions")
// Verify legacy CanDo returns false (forcing fallback to IAM)
assert.False(t, identity.CanDo("Read", "test-bucket", "/any/path"),
"CanDo should return false for STS identities with no Actions")
// Verify authorization path selection
// When identity.Actions is empty and iamIntegration is available, it should use IAM authorization
@ -143,15 +143,15 @@ func TestSTSIdentityAuthorizationFlow(t *testing.T) {
assert.Empty(t, identity.Actions,
"STS identities should have empty Actions to trigger the IAM authorization path")
// Test 2: Verify canDo returns false (legacy auth should be bypassed)
// Test 2: Verify CanDo returns false (legacy auth should be bypassed)
// This is important because it confirms that identity.Actions being empty
// correctly forces the authorization logic to fall back to iam.authorizeWithIAM
assert.False(t, identity.canDo("Read", "test-bucket", "/any/path"),
"canDo should return false for STS identities with no Actions")
assert.False(t, identity.CanDo("Read", "test-bucket", "/any/path"),
"CanDo should return false for STS identities with no Actions")
// With empty Actions and populated PolicyNames, IAM authorization path will be used
// as per auth_credentials.go:703-713
t.Log("✓ Verified: STS identity correctly bypasses legacy canDo() to use IAM authorization path")
t.Log("✓ Verified: STS identity correctly bypasses legacy CanDo() to use IAM authorization path")
}
// TestSTSIdentityWithoutPolicyNames tests the bug scenario where PolicyNames is not populated
@ -237,7 +237,7 @@ func TestCanDoPathConstruction(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := identity.canDo(tc.action, tc.bucket, tc.objectKey)
result := identity.CanDo(tc.action, tc.bucket, tc.objectKey)
// Robust path construction for verification
fullPath := tc.bucket

183
weed/s3api/iceberg/iceberg.go

@ -13,68 +13,141 @@ import (
"github.com/gorilla/mux"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3tables"
)
func (s *Server) checkAuth(w http.ResponseWriter, r *http.Request, action s3api.Action, bucketName string) bool {
identityName := s3_constants.GetIdentityNameFromContext(r)
if identityName == "" {
writeError(w, http.StatusUnauthorized, "NotAuthorizedException", "Authentication required")
return false
}
identityObj := s3_constants.GetIdentityFromContext(r)
if identityObj == nil {
writeError(w, http.StatusForbidden, "ForbiddenException", "Access denied: missing identity")
return false
}
identity, ok := identityObj.(*s3api.Identity)
if !ok {
writeError(w, http.StatusForbidden, "ForbiddenException", "Access denied: invalid identity")
return false
}
if !identity.CanDo(action, bucketName, "") {
writeError(w, http.StatusForbidden, "ForbiddenException", "Access denied")
return false
}
return true
}
// FilerClient provides access to the filer for storage operations.
type FilerClient interface {
WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error
}
type S3Authenticator interface {
AuthenticateRequest(r *http.Request) (string, interface{}, s3err.ErrorCode)
}
// Server implements the Iceberg REST Catalog API.
type Server struct {
filerClient FilerClient
tablesManager *s3tables.Manager
prefix string // optional prefix for routes
authenticator S3Authenticator
}
// NewServer creates a new Iceberg REST Catalog server.
func NewServer(filerClient FilerClient) *Server {
func NewServer(filerClient FilerClient, authenticator S3Authenticator) *Server {
manager := s3tables.NewManager()
return &Server{
filerClient: filerClient,
tablesManager: manager,
prefix: "",
authenticator: authenticator,
}
}
// RegisterRoutes registers Iceberg REST API routes on the provided router.
func (s *Server) RegisterRoutes(router *mux.Router) {
// Configuration endpoint
router.HandleFunc("/v1/config", s.handleConfig).Methods(http.MethodGet)
router.HandleFunc("/v1/config", s.Auth(s.handleConfig)).Methods(http.MethodGet)
// Namespace endpoints
router.HandleFunc("/v1/namespaces", s.handleListNamespaces).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces", s.handleCreateNamespace).Methods(http.MethodPost)
router.HandleFunc("/v1/namespaces/{namespace}", s.handleGetNamespace).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces/{namespace}", s.handleNamespaceExists).Methods(http.MethodHead)
router.HandleFunc("/v1/namespaces/{namespace}", s.handleDropNamespace).Methods(http.MethodDelete)
router.HandleFunc("/v1/namespaces", s.Auth(s.handleListNamespaces)).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces", s.Auth(s.handleCreateNamespace)).Methods(http.MethodPost)
router.HandleFunc("/v1/namespaces/{namespace}", s.Auth(s.handleGetNamespace)).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces/{namespace}", s.Auth(s.handleNamespaceExists)).Methods(http.MethodHead)
router.HandleFunc("/v1/namespaces/{namespace}", s.Auth(s.handleDropNamespace)).Methods(http.MethodDelete)
// Table endpoints
router.HandleFunc("/v1/namespaces/{namespace}/tables", s.handleListTables).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces/{namespace}/tables", s.handleCreateTable).Methods(http.MethodPost)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.handleLoadTable).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.handleTableExists).Methods(http.MethodHead)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.handleDropTable).Methods(http.MethodDelete)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.handleUpdateTable).Methods(http.MethodPost)
router.HandleFunc("/v1/namespaces/{namespace}/tables", s.Auth(s.handleListTables)).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces/{namespace}/tables", s.Auth(s.handleCreateTable)).Methods(http.MethodPost)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.Auth(s.handleLoadTable)).Methods(http.MethodGet)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.Auth(s.handleTableExists)).Methods(http.MethodHead)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.Auth(s.handleDropTable)).Methods(http.MethodDelete)
router.HandleFunc("/v1/namespaces/{namespace}/tables/{table}", s.Auth(s.handleUpdateTable)).Methods(http.MethodPost)
// With prefix support
router.HandleFunc("/v1/{prefix}/namespaces", s.handleListNamespaces).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces", s.handleCreateNamespace).Methods(http.MethodPost)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}", s.handleGetNamespace).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}", s.handleNamespaceExists).Methods(http.MethodHead)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}", s.handleDropNamespace).Methods(http.MethodDelete)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables", s.handleListTables).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables", s.handleCreateTable).Methods(http.MethodPost)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.handleLoadTable).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.handleTableExists).Methods(http.MethodHead)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.handleDropTable).Methods(http.MethodDelete)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.handleUpdateTable).Methods(http.MethodPost)
router.HandleFunc("/v1/{prefix}/namespaces", s.Auth(s.handleListNamespaces)).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces", s.Auth(s.handleCreateNamespace)).Methods(http.MethodPost)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}", s.Auth(s.handleGetNamespace)).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}", s.Auth(s.handleNamespaceExists)).Methods(http.MethodHead)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}", s.Auth(s.handleDropNamespace)).Methods(http.MethodDelete)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables", s.Auth(s.handleListTables)).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables", s.Auth(s.handleCreateTable)).Methods(http.MethodPost)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.Auth(s.handleLoadTable)).Methods(http.MethodGet)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.Auth(s.handleTableExists)).Methods(http.MethodHead)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.Auth(s.handleDropTable)).Methods(http.MethodDelete)
router.HandleFunc("/v1/{prefix}/namespaces/{namespace}/tables/{table}", s.Auth(s.handleUpdateTable)).Methods(http.MethodPost)
glog.V(0).Infof("Registered Iceberg REST Catalog routes")
}
func (s *Server) Auth(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if s.authenticator == nil {
writeError(w, http.StatusUnauthorized, "NotAuthorizedException", "Authentication required")
return
}
identityName, identity, errCode := s.authenticator.AuthenticateRequest(r)
if errCode != s3err.ErrNone {
apiErr := s3err.GetAPIError(errCode)
errorType := "RESTException"
switch apiErr.HTTPStatusCode {
case http.StatusForbidden:
errorType = "ForbiddenException"
case http.StatusUnauthorized:
errorType = "NotAuthorizedException"
case http.StatusBadRequest:
errorType = "BadRequestException"
case http.StatusInternalServerError:
errorType = "InternalServerError"
}
writeError(w, apiErr.HTTPStatusCode, errorType, apiErr.Description)
return
}
if identityName != "" || identity != nil {
ctx := r.Context()
if identityName != "" {
ctx = s3_constants.SetIdentityNameInContext(ctx, identityName)
}
if identity != nil {
ctx = s3_constants.SetIdentityInContext(ctx, identity)
}
r = r.WithContext(ctx)
}
handler(w, r)
}
}
// parseNamespace parses the namespace from path parameter.
// Iceberg uses unit separator (0x1F) for multi-level namespaces.
// Note: mux already decodes URL-encoded path parameters, so we only split by unit separator.
@ -140,6 +213,10 @@ func buildTableBucketARN(bucketName string) string {
// handleConfig returns catalog configuration.
func (s *Server) handleConfig(w http.ResponseWriter, r *http.Request) {
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_READ, bucketName) {
return
}
config := CatalogConfig{
Defaults: map[string]string{},
Overrides: map[string]string{},
@ -150,6 +227,9 @@ func (s *Server) handleConfig(w http.ResponseWriter, r *http.Request) {
// handleListNamespaces lists namespaces in a catalog.
func (s *Server) handleListNamespaces(w http.ResponseWriter, r *http.Request) {
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_LIST, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
// Use S3 Tables manager to list namespaces
@ -185,16 +265,19 @@ func (s *Server) handleListNamespaces(w http.ResponseWriter, r *http.Request) {
// handleCreateNamespace creates a new namespace.
func (s *Server) handleCreateNamespace(w http.ResponseWriter, r *http.Request) {
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_WRITE, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
var req CreateNamespaceRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "BadRequest", "Invalid request body")
writeError(w, http.StatusBadRequest, "BadRequestException", "Invalid request body")
return
}
if len(req.Namespace) == 0 {
writeError(w, http.StatusBadRequest, "BadRequest", "Namespace is required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Namespace is required")
return
}
@ -212,7 +295,7 @@ func (s *Server) handleCreateNamespace(w http.ResponseWriter, r *http.Request) {
if err != nil {
if strings.Contains(err.Error(), "already exists") {
writeError(w, http.StatusConflict, "NamespaceAlreadyExistsException", err.Error())
writeError(w, http.StatusConflict, "AlreadyExistsException", err.Error())
return
}
glog.V(1).Infof("Iceberg: CreateNamespace error: %v", err)
@ -232,11 +315,14 @@ func (s *Server) handleGetNamespace(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
namespace := parseNamespace(vars["namespace"])
if len(namespace) == 0 {
writeError(w, http.StatusBadRequest, "BadRequest", "Namespace is required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Namespace is required")
return
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_READ, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
// Use S3 Tables manager to get namespace
@ -278,6 +364,9 @@ func (s *Server) handleNamespaceExists(w http.ResponseWriter, r *http.Request) {
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_READ, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
getReq := &s3tables.GetNamespaceRequest{
@ -308,11 +397,14 @@ func (s *Server) handleDropNamespace(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
namespace := parseNamespace(vars["namespace"])
if len(namespace) == 0 {
writeError(w, http.StatusBadRequest, "BadRequest", "Namespace is required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Namespace is required")
return
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_DELETE_BUCKET, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
deleteReq := &s3tables.DeleteNamespaceRequest{
@ -347,11 +439,14 @@ func (s *Server) handleListTables(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
namespace := parseNamespace(vars["namespace"])
if len(namespace) == 0 {
writeError(w, http.StatusBadRequest, "BadRequest", "Namespace is required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Namespace is required")
return
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_LIST, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
listReq := &s3tables.ListTablesRequest{
@ -396,22 +491,25 @@ func (s *Server) handleCreateTable(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
namespace := parseNamespace(vars["namespace"])
if len(namespace) == 0 {
writeError(w, http.StatusBadRequest, "BadRequest", "Namespace is required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Namespace is required")
return
}
var req CreateTableRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "BadRequest", "Invalid request body")
writeError(w, http.StatusBadRequest, "BadRequestException", "Invalid request body")
return
}
if req.Name == "" {
writeError(w, http.StatusBadRequest, "BadRequest", "Table name is required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Table name is required")
return
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_WRITE, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
// Generate UUID for the new table
@ -445,7 +543,7 @@ func (s *Server) handleCreateTable(w http.ResponseWriter, r *http.Request) {
if err != nil {
if strings.Contains(err.Error(), "already exists") {
writeError(w, http.StatusConflict, "TableAlreadyExistsException", err.Error())
writeError(w, http.StatusConflict, "AlreadyExistsException", err.Error())
return
}
glog.V(1).Infof("Iceberg: CreateTable error: %v", err)
@ -468,11 +566,14 @@ func (s *Server) handleLoadTable(w http.ResponseWriter, r *http.Request) {
tableName := vars["table"]
if len(namespace) == 0 || tableName == "" {
writeError(w, http.StatusBadRequest, "BadRequest", "Namespace and table name are required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Namespace and table name are required")
return
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_READ, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
getReq := &s3tables.GetTableRequest{
@ -534,6 +635,9 @@ func (s *Server) handleTableExists(w http.ResponseWriter, r *http.Request) {
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_READ, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
getReq := &s3tables.GetTableRequest{
@ -562,11 +666,14 @@ func (s *Server) handleDropTable(w http.ResponseWriter, r *http.Request) {
tableName := vars["table"]
if len(namespace) == 0 || tableName == "" {
writeError(w, http.StatusBadRequest, "BadRequest", "Namespace and table name are required")
writeError(w, http.StatusBadRequest, "BadRequestException", "Namespace and table name are required")
return
}
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_DELETE_BUCKET, bucketName) {
return
}
bucketARN := buildTableBucketARN(bucketName)
deleteReq := &s3tables.DeleteTableRequest{
@ -595,6 +702,10 @@ func (s *Server) handleDropTable(w http.ResponseWriter, r *http.Request) {
// handleUpdateTable commits updates to a table.
func (s *Server) handleUpdateTable(w http.ResponseWriter, r *http.Request) {
bucketName := getBucketFromPrefix(r)
if !s.checkAuth(w, r, s3_constants.ACTION_WRITE, bucketName) {
return
}
// Return 501 Not Implemented
writeError(w, http.StatusNotImplemented, "UnsupportedOperationException", "Table update/commit not implemented")
}

4
weed/s3api/policy_engine/integration.go

@ -14,7 +14,7 @@ type Action string
// Identity represents a user identity - this should match the type in auth_credentials.go
type Identity interface {
canDo(action Action, bucket string, objectKey string) bool
CanDo(action Action, bucket string, objectKey string) bool
}
// PolicyBackedIAM provides policy-based access control with fallback to legacy IAM
@ -104,7 +104,7 @@ func (p *PolicyBackedIAM) evaluateLegacyAction(action, bucketName, objectName, p
// If we have an identity, check if it can perform the action
if identity != nil {
return identity.canDo(legacyAction, bucketName, objectName)
return identity.CanDo(legacyAction, bucketName, objectName)
}
}

2
weed/s3api/s3api_bucket_handlers.go

@ -94,7 +94,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
hasPermission = (errCode == s3err.ErrNone)
} else {
// Use legacy authorization for non-JWT users
hasPermission = identity.canDo(s3_constants.ACTION_LIST, entry.Name, "")
hasPermission = identity.CanDo(s3_constants.ACTION_LIST, entry.Name, "")
}
if !hasPermission {

16
weed/s3api/s3api_bucket_handlers_test.go

@ -723,7 +723,7 @@ func TestListBucketsIssue7647(t *testing.T) {
assert.True(t, isVisible, "Admin user should see all buckets, even ones they don't own")
// Test permission check for List action
canList := rootIdentity.canDo(s3_constants.ACTION_LIST, "test", "")
canList := rootIdentity.CanDo(s3_constants.ACTION_LIST, "test", "")
assert.True(t, canList, "Root user with List action should be able to list buckets")
})
@ -823,7 +823,7 @@ func TestListBucketsIssue7796(t *testing.T) {
assert.False(t, isOwner, "geoserver user should not be owner of bucket created by admin")
// Test permission check - should return true (has List:geoserver permission)
canList := geoserverIdentity.canDo(s3_constants.ACTION_LIST, "geoserver", "")
canList := geoserverIdentity.CanDo(s3_constants.ACTION_LIST, "geoserver", "")
assert.True(t, canList, "geoserver user with List:geoserver should be able to list geoserver bucket")
// Verify the combined visibility logic: ownership OR permission
@ -856,7 +856,7 @@ func TestListBucketsIssue7796(t *testing.T) {
assert.False(t, isOwner, "No owner metadata means not owned")
// But has permission
canList := geoserverIdentity.canDo(s3_constants.ACTION_LIST, "geoserver", "")
canList := geoserverIdentity.CanDo(s3_constants.ACTION_LIST, "geoserver", "")
assert.True(t, canList, "Has explicit List:geoserver permission")
// Verify the combined visibility logic: ownership OR permission
@ -891,7 +891,7 @@ func TestListBucketsIssue7796(t *testing.T) {
assert.False(t, isOwner, "geoserver doesn't own otherbucket")
// No permission for this bucket
canList := geoserverIdentity.canDo(s3_constants.ACTION_LIST, "otherbucket", "")
canList := geoserverIdentity.CanDo(s3_constants.ACTION_LIST, "otherbucket", "")
assert.False(t, canList, "geoserver has no List permission for otherbucket")
// Verify the combined visibility logic: ownership OR permission
@ -939,8 +939,8 @@ func TestListBucketsIssue7796(t *testing.T) {
assert.False(t, isOwnerGeoTTL)
// But has permission via wildcard
canListGeo := geoIdentity.canDo(s3_constants.ACTION_LIST, "geoserver", "")
canListGeoTTL := geoIdentity.canDo(s3_constants.ACTION_LIST, "geoserver-ttl", "")
canListGeo := geoIdentity.CanDo(s3_constants.ACTION_LIST, "geoserver", "")
canListGeoTTL := geoIdentity.CanDo(s3_constants.ACTION_LIST, "geoserver-ttl", "")
assert.True(t, canListGeo)
assert.True(t, canListGeoTTL)
@ -949,7 +949,7 @@ func TestListBucketsIssue7796(t *testing.T) {
assert.True(t, isOwnerGeoTTL || canListGeoTTL, "geoserver-ttl bucket should be visible via wildcard permission")
// Should NOT have permission for unrelated buckets
canListOther := geoIdentity.canDo(s3_constants.ACTION_LIST, "otherbucket", "")
canListOther := geoIdentity.CanDo(s3_constants.ACTION_LIST, "otherbucket", "")
assert.False(t, canListOther, "No permission for otherbucket")
assert.False(t, false || canListOther, "otherbucket should NOT be visible (no ownership, no permission)")
})
@ -1020,7 +1020,7 @@ func TestListBucketsIssue7796(t *testing.T) {
// Skip permission check if user is already the owner (optimization)
if !isOwner {
// Check permission
hasPermission := geoserverIdentity.canDo(s3_constants.ACTION_LIST, entry.Name, "")
hasPermission := geoserverIdentity.CanDo(s3_constants.ACTION_LIST, entry.Name, "")
if !hasPermission {
continue
}

6
weed/s3api/s3api_governance_permissions_test.go

@ -181,9 +181,9 @@ func TestCheckGovernanceBypassPermissionIntegrationBehavior(t *testing.T) {
//
// 1. Function calls s3a.iam.authRequest() with the bypass action
// 2. If authRequest returns errCode != s3err.ErrNone, function returns false
// 3. If authRequest succeeds, function checks identity.canDo() with the bypass action
// 4. If canDo() returns true, function returns true
// 5. If bypass permission fails, function checks admin action with identity.canDo()
// 3. If authRequest succeeds, function checks identity.CanDo() with the bypass action
// 4. If CanDo() returns true, function returns true
// 5. If bypass permission fails, function checks admin action with identity.CanDo()
// 6. If admin action succeeds, function returns true and logs admin access
// 7. If all checks fail, function returns false
//

6
weed/s3api/s3api_key_rotation.go

@ -72,7 +72,7 @@ func (s3a *S3ApiServer) rotateSSEKMSKey(entry *filer_pb.Entry, r *http.Request)
// For SSE-KMS, we can potentially do metadata-only rotation
// if the KMS service supports key aliasing and the data encryption key can be re-wrapped
if s3a.canDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID) {
if s3a.CanDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID) {
return s3a.rotateSSEKMSMetadataOnly(entry, srcKeyID, dstKeyID)
}
@ -80,8 +80,8 @@ func (s3a *S3ApiServer) rotateSSEKMSKey(entry *filer_pb.Entry, r *http.Request)
return s3a.rotateSSEKMSChunks(entry, srcKeyID, dstKeyID, r)
}
// canDoMetadataOnlyKMSRotation determines if KMS key rotation can be done metadata-only
func (s3a *S3ApiServer) canDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID string) bool {
// CanDoMetadataOnlyKMSRotation determines if KMS key rotation can be done metadata-only
func (s3a *S3ApiServer) CanDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID string) bool {
// For now, we'll be conservative and always re-encrypt
// In a full implementation, this would check if:
// 1. Both keys are in the same KMS instance

2
weed/s3api/s3api_object_handlers_acl.go

@ -266,7 +266,7 @@ func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Reque
}
// 4. Verify the authenticated identity can perform WriteAcp on this specific object
if identity == nil || !identity.canDo(writeAcpAction, bucket, object) {
if identity == nil || !identity.CanDo(writeAcpAction, bucket, object) {
glog.V(3).Infof("PutObjectAclHandler: Identity %v cannot perform WriteAcp on %s/%s", identity, bucket, object)
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
return

16
weed/s3api/s3api_object_handlers_list_test.go

@ -390,8 +390,8 @@ func TestObjectLevelListPermissions(t *testing.T) {
},
}
// Test cases for canDo method
// Note: canDo concatenates bucket + objectKey, so "test-bucket" + "/allowed-prefix/file.txt" = "test-bucket/allowed-prefix/file.txt"
// Test cases for CanDo method
// Note: CanDo concatenates bucket + objectKey, so "test-bucket" + "/allowed-prefix/file.txt" = "test-bucket/allowed-prefix/file.txt"
testCases := []struct {
name string
action Action
@ -444,7 +444,7 @@ func TestObjectLevelListPermissions(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := identity.canDo(tc.action, tc.bucket, tc.object)
result := identity.CanDo(tc.action, tc.bucket, tc.object)
assert.Equal(t, tc.shouldAllow, result, tc.description)
})
}
@ -469,12 +469,12 @@ func TestObjectLevelListPermissions(t *testing.T) {
}
for _, tc := range testCases {
result := identity.canDo("List", "test-bucket", tc.object)
result := identity.CanDo("List", "test-bucket", tc.object)
assert.True(t, result, "Bucket-level permission should allow access to %s", tc.object)
}
// Should deny access to different buckets
result := identity.canDo("List", "other-bucket", "/file.txt")
result := identity.CanDo("List", "other-bucket", "/file.txt")
assert.False(t, result, "Should deny access to objects in different buckets")
})
@ -553,7 +553,7 @@ func TestObjectLevelListPermissions(t *testing.T) {
// After our middleware fix, it should check permission for the prefix
// Simulate: action=ACTION_LIST && object=="" && prefix="/txzl/" → object="/txzl/"
result := identity.canDo("List", "bdaai-shared-bucket", "/txzl/")
result := identity.CanDo("List", "bdaai-shared-bucket", "/txzl/")
// This should be allowed because:
// target = "List:bdaai-shared-bucket/txzl/"
@ -562,11 +562,11 @@ func TestObjectLevelListPermissions(t *testing.T) {
assert.True(t, result, "User with 'List:bdaai-shared-bucket/txzl/*' should be able to list with prefix txzl/")
// Test that they can't list with a different prefix
result = identity.canDo("List", "bdaai-shared-bucket", "/other-prefix/")
result = identity.CanDo("List", "bdaai-shared-bucket", "/other-prefix/")
assert.False(t, result, "User should not be able to list with a different prefix")
// Test that they can't list a different bucket
result = identity.canDo("List", "other-bucket", "/txzl/")
result = identity.CanDo("List", "other-bucket", "/txzl/")
assert.False(t, result, "User should not be able to list a different bucket")
})

4
weed/s3api/s3api_object_retention.go

@ -549,14 +549,14 @@ func (s3a *S3ApiServer) checkGovernanceBypassPermission(request *http.Request, b
}
// Verify that the authenticated identity can perform this action
if identity != nil && identity.canDo(action, bucket, object) {
if identity != nil && identity.CanDo(action, bucket, object) {
return true
}
// Additional check: allow users with Admin action to bypass governance retention
// Use the proper S3 Admin action constant instead of generic isAdmin() method
adminAction := Action(fmt.Sprintf("%s:%s", s3_constants.ACTION_ADMIN, resource))
if identity != nil && identity.canDo(adminAction, bucket, object) {
if identity != nil && identity.CanDo(adminAction, bucket, object) {
glog.V(2).Infof("Admin user %s granted governance bypass permission for %s/%s", identity.Name, bucket, object)
return true
}

12
weed/s3api/s3api_server.go

@ -857,3 +857,15 @@ func loadIAMManagerFromConfig(configPath string, filerAddressProvider func() str
return iamManager, nil
}
// AuthenticateRequest authenticates the request and returns the identity name and object
func (s3a *S3ApiServer) AuthenticateRequest(r *http.Request) (string, interface{}, s3err.ErrorCode) {
if s3a.iam == nil {
return "", nil, s3err.ErrAccessDenied
}
identity, err := s3a.iam.AuthenticateRequest(r)
if identity != nil {
return identity.Name, identity, err
}
return "", nil, err
}
Loading…
Cancel
Save