Browse Source

Merge branch 'master' into add_error_list_each_entry_func

pull/7485/head
tam-i13 3 weeks ago
committed by GitHub
parent
commit
52f7d29742
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 9
      weed/s3api/auth_credentials.go
  2. 239
      weed/s3api/policy_conversion.go
  3. 614
      weed/s3api/policy_conversion_test.go
  4. 334
      weed/s3api/s3_action_resolver.go
  5. 84
      weed/s3api/s3_constants/s3_action_strings.go
  6. 316
      weed/s3api/s3_granular_action_security_test.go
  7. 173
      weed/s3api/s3_iam_middleware.go
  8. 6
      weed/s3api/s3_iam_simple_test.go
  9. 48
      weed/s3api/s3_list_parts_action_test.go
  10. 4
      weed/s3api/s3_multipart_iam_test.go
  11. 3
      weed/s3api/s3api_bucket_handlers.go
  12. 138
      weed/s3api/s3api_bucket_policy_engine.go
  13. 9
      weed/s3api/s3api_object_handlers_put.go
  14. 12
      weed/s3api/s3api_server.go
  15. 6
      weed/server/volume_server_handlers.go
  16. 10
      weed/server/volume_server_handlers_read.go
  17. 38
      weed/shell/command_volume_check_disk.go

9
weed/s3api/auth_credentials.go

@ -54,8 +54,8 @@ type IdentityAccessManagement struct {
// IAM Integration for advanced features
iamIntegration *S3IAMIntegration
// Link to S3ApiServer for bucket policy evaluation
s3ApiServer *S3ApiServer
// Bucket policy engine for evaluating bucket policies
policyEngine *BucketPolicyEngine
}
type Identity struct {
@ -511,9 +511,10 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
// - Explicit DENY in bucket policy → immediate rejection
// - Explicit ALLOW in bucket policy → grant access (bypass IAM checks)
// - No policy or indeterminate → fall through to IAM checks
if iam.s3ApiServer != nil && iam.s3ApiServer.policyEngine != nil && bucket != "" {
if iam.policyEngine != nil && bucket != "" {
principal := buildPrincipalARN(identity)
allowed, evaluated, err := iam.s3ApiServer.policyEngine.EvaluatePolicy(bucket, object, string(action), principal)
// Use context-aware policy evaluation to get the correct S3 action
allowed, evaluated, err := iam.policyEngine.EvaluatePolicyWithContext(bucket, object, string(action), principal, r)
if err != nil {
// SECURITY: Fail-close on policy evaluation errors

239
weed/s3api/policy_conversion.go

@ -0,0 +1,239 @@
package s3api
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/iam/policy"
"github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
)
// ConvertPolicyDocumentToPolicyEngine converts a policy.PolicyDocument to policy_engine.PolicyDocument
// This function provides type-safe conversion with explicit field mapping and error handling.
// It handles the differences between the two types:
// - Converts []string fields to StringOrStringSlice
// - Maps Condition types with type validation
// - Converts Principal fields with support for AWS principals only
// - Handles optional fields (Id, NotPrincipal, NotAction, NotResource are ignored in policy_engine)
//
// Returns an error if the policy contains unsupported types or malformed data.
func ConvertPolicyDocumentToPolicyEngine(src *policy.PolicyDocument) (*policy_engine.PolicyDocument, error) {
if src == nil {
return nil, nil
}
// Warn if the policy document Id is being dropped
if src.Id != "" {
glog.Warningf("policy document Id %q is not supported and will be ignored", src.Id)
}
dest := &policy_engine.PolicyDocument{
Version: src.Version,
Statement: make([]policy_engine.PolicyStatement, len(src.Statement)),
}
for i := range src.Statement {
stmt, err := convertStatement(&src.Statement[i])
if err != nil {
return nil, fmt.Errorf("failed to convert statement %d: %w", i, err)
}
dest.Statement[i] = stmt
}
return dest, nil
}
// convertStatement converts a policy.Statement to policy_engine.PolicyStatement
func convertStatement(src *policy.Statement) (policy_engine.PolicyStatement, error) {
// Check for unsupported fields that would fundamentally change policy semantics
// These fields invert the logic and ignoring them could create security holes
if len(src.NotAction) > 0 {
return policy_engine.PolicyStatement{}, fmt.Errorf("statement %q: NotAction is not supported (would invert action logic, creating potential security risk)", src.Sid)
}
if len(src.NotResource) > 0 {
return policy_engine.PolicyStatement{}, fmt.Errorf("statement %q: NotResource is not supported (would invert resource logic, creating potential security risk)", src.Sid)
}
if src.NotPrincipal != nil {
return policy_engine.PolicyStatement{}, fmt.Errorf("statement %q: NotPrincipal is not supported (would invert principal logic, creating potential security risk)", src.Sid)
}
stmt := policy_engine.PolicyStatement{
Sid: src.Sid,
Effect: policy_engine.PolicyEffect(src.Effect),
}
// Convert Action ([]string to StringOrStringSlice)
if len(src.Action) > 0 {
stmt.Action = policy_engine.NewStringOrStringSlice(src.Action...)
}
// Convert Resource ([]string to StringOrStringSlice)
if len(src.Resource) > 0 {
stmt.Resource = policy_engine.NewStringOrStringSlice(src.Resource...)
}
// Convert Principal (interface{} to *StringOrStringSlice)
if src.Principal != nil {
principal, err := convertPrincipal(src.Principal)
if err != nil {
return policy_engine.PolicyStatement{}, fmt.Errorf("statement %q: failed to convert principal: %w", src.Sid, err)
}
stmt.Principal = principal
}
// Convert Condition (map[string]map[string]interface{} to PolicyConditions)
if len(src.Condition) > 0 {
condition, err := convertCondition(src.Condition)
if err != nil {
return policy_engine.PolicyStatement{}, fmt.Errorf("statement %q: failed to convert condition: %w", src.Sid, err)
}
stmt.Condition = condition
}
return stmt, nil
}
// convertPrincipal converts a Principal field to *StringOrStringSlice
func convertPrincipal(principal interface{}) (*policy_engine.StringOrStringSlice, error) {
if principal == nil {
return nil, nil
}
switch p := principal.(type) {
case string:
if p == "" {
return nil, fmt.Errorf("principal string cannot be empty")
}
result := policy_engine.NewStringOrStringSlice(p)
return &result, nil
case []string:
if len(p) == 0 {
return nil, nil
}
for _, s := range p {
if s == "" {
return nil, fmt.Errorf("principal string in slice cannot be empty")
}
}
result := policy_engine.NewStringOrStringSlice(p...)
return &result, nil
case []interface{}:
strs := make([]string, 0, len(p))
for _, v := range p {
if v != nil {
str, err := convertToString(v)
if err != nil {
return nil, fmt.Errorf("failed to convert principal array item: %w", err)
}
if str == "" {
return nil, fmt.Errorf("principal string in slice cannot be empty")
}
strs = append(strs, str)
}
}
if len(strs) == 0 {
return nil, nil
}
result := policy_engine.NewStringOrStringSlice(strs...)
return &result, nil
case map[string]interface{}:
// Handle AWS-style principal with service/user keys
// Example: {"AWS": "arn:aws:iam::123456789012:user/Alice"}
// Only AWS principals are supported for now. Other types like Service or Federated need special handling.
awsPrincipals, ok := p["AWS"]
if !ok || len(p) != 1 {
glog.Warningf("unsupported principal map, only a single 'AWS' key is supported: %v", p)
return nil, fmt.Errorf("unsupported principal map, only a single 'AWS' key is supported, got keys: %v", getMapKeys(p))
}
// Recursively convert the AWS principal value
res, err := convertPrincipal(awsPrincipals)
if err != nil {
return nil, fmt.Errorf("invalid 'AWS' principal value: %w", err)
}
return res, nil
default:
return nil, fmt.Errorf("unsupported principal type: %T", p)
}
}
// convertCondition converts policy conditions to PolicyConditions
func convertCondition(src map[string]map[string]interface{}) (policy_engine.PolicyConditions, error) {
if len(src) == 0 {
return nil, nil
}
dest := make(policy_engine.PolicyConditions)
for condType, condBlock := range src {
destBlock := make(map[string]policy_engine.StringOrStringSlice)
for key, value := range condBlock {
condValue, err := convertConditionValue(value)
if err != nil {
return nil, fmt.Errorf("failed to convert condition %s[%s]: %w", condType, key, err)
}
destBlock[key] = condValue
}
dest[condType] = destBlock
}
return dest, nil
}
// convertConditionValue converts a condition value to StringOrStringSlice
func convertConditionValue(value interface{}) (policy_engine.StringOrStringSlice, error) {
switch v := value.(type) {
case string:
return policy_engine.NewStringOrStringSlice(v), nil
case []string:
return policy_engine.NewStringOrStringSlice(v...), nil
case []interface{}:
strs := make([]string, 0, len(v))
for _, item := range v {
if item != nil {
str, err := convertToString(item)
if err != nil {
return policy_engine.StringOrStringSlice{}, fmt.Errorf("failed to convert condition array item: %w", err)
}
strs = append(strs, str)
}
}
return policy_engine.NewStringOrStringSlice(strs...), nil
default:
// For non-string types, convert to string
// This handles numbers, booleans, etc.
str, err := convertToString(v)
if err != nil {
return policy_engine.StringOrStringSlice{}, err
}
return policy_engine.NewStringOrStringSlice(str), nil
}
}
// convertToString converts any value to string representation
// Returns an error for unsupported types to prevent silent data corruption
func convertToString(value interface{}) (string, error) {
switch v := value.(type) {
case string:
return v, nil
case bool,
int, int8, int16, int32, int64,
uint, uint8, uint16, uint32, uint64,
float32, float64:
// Use fmt.Sprint for supported primitive types
return fmt.Sprint(v), nil
default:
glog.Warningf("unsupported type in policy conversion: %T", v)
return "", fmt.Errorf("unsupported type in policy conversion: %T", v)
}
}
// getMapKeys returns the keys of a map as a slice (helper for error messages)
func getMapKeys(m map[string]interface{}) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}

614
weed/s3api/policy_conversion_test.go

@ -0,0 +1,614 @@
package s3api
import (
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/weed/iam/policy"
)
func TestConvertPolicyDocumentWithMixedTypes(t *testing.T) {
// Test that numeric and boolean values in arrays are properly converted
src := &policy.PolicyDocument{
Version: "2012-10-17",
Statement: []policy.Statement{
{
Sid: "TestMixedTypes",
Effect: "Allow",
Action: []string{"s3:GetObject"},
Resource: []string{"arn:aws:s3:::bucket/*"},
Principal: []interface{}{"user1", 123, true}, // Mixed types
Condition: map[string]map[string]interface{}{
"NumericEquals": {
"s3:max-keys": []interface{}{100, 200, "300"}, // Mixed types
},
"StringEquals": {
"s3:prefix": []interface{}{"test", 123, false}, // Mixed types
},
},
},
},
}
// Convert
dest, err := ConvertPolicyDocumentToPolicyEngine(src)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Verify document structure
if dest == nil {
t.Fatal("Expected non-nil result")
}
if dest.Version != "2012-10-17" {
t.Errorf("Expected version '2012-10-17', got '%s'", dest.Version)
}
if len(dest.Statement) != 1 {
t.Fatalf("Expected 1 statement, got %d", len(dest.Statement))
}
stmt := dest.Statement[0]
// Verify Principal conversion (should have 3 items converted to strings)
if stmt.Principal == nil {
t.Fatal("Expected non-nil Principal")
}
principals := stmt.Principal.Strings()
if len(principals) != 3 {
t.Errorf("Expected 3 principals, got %d", len(principals))
}
// Check that numeric and boolean were converted
expectedPrincipals := []string{"user1", "123", "true"}
for i, expected := range expectedPrincipals {
if principals[i] != expected {
t.Errorf("Principal[%d]: expected '%s', got '%s'", i, expected, principals[i])
}
}
// Verify Condition conversion
if len(stmt.Condition) != 2 {
t.Errorf("Expected 2 condition blocks, got %d", len(stmt.Condition))
}
// Check NumericEquals condition
numericCond, ok := stmt.Condition["NumericEquals"]
if !ok {
t.Fatal("Expected NumericEquals condition")
}
maxKeys, ok := numericCond["s3:max-keys"]
if !ok {
t.Fatal("Expected s3:max-keys in NumericEquals")
}
maxKeysStrs := maxKeys.Strings()
expectedMaxKeys := []string{"100", "200", "300"}
if len(maxKeysStrs) != len(expectedMaxKeys) {
t.Errorf("Expected %d max-keys values, got %d", len(expectedMaxKeys), len(maxKeysStrs))
}
for i, expected := range expectedMaxKeys {
if maxKeysStrs[i] != expected {
t.Errorf("max-keys[%d]: expected '%s', got '%s'", i, expected, maxKeysStrs[i])
}
}
// Check StringEquals condition
stringCond, ok := stmt.Condition["StringEquals"]
if !ok {
t.Fatal("Expected StringEquals condition")
}
prefix, ok := stringCond["s3:prefix"]
if !ok {
t.Fatal("Expected s3:prefix in StringEquals")
}
prefixStrs := prefix.Strings()
expectedPrefix := []string{"test", "123", "false"}
if len(prefixStrs) != len(expectedPrefix) {
t.Errorf("Expected %d prefix values, got %d", len(expectedPrefix), len(prefixStrs))
}
for i, expected := range expectedPrefix {
if prefixStrs[i] != expected {
t.Errorf("prefix[%d]: expected '%s', got '%s'", i, expected, prefixStrs[i])
}
}
}
func TestConvertPrincipalWithMapAndMixedTypes(t *testing.T) {
// Test AWS-style principal map with mixed types
principalMap := map[string]interface{}{
"AWS": []interface{}{
"arn:aws:iam::123456789012:user/Alice",
456, // User ID as number
true, // Some boolean value
},
}
result, err := convertPrincipal(principalMap)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if result == nil {
t.Fatal("Expected non-nil result")
}
strs := result.Strings()
if len(strs) != 3 {
t.Errorf("Expected 3 values, got %d", len(strs))
}
expectedValues := []string{
"arn:aws:iam::123456789012:user/Alice",
"456",
"true",
}
for i, expected := range expectedValues {
if strs[i] != expected {
t.Errorf("Value[%d]: expected '%s', got '%s'", i, expected, strs[i])
}
}
}
func TestConvertConditionValueWithMixedTypes(t *testing.T) {
// Test []interface{} with mixed types
mixedValues := []interface{}{
"string",
123,
true,
456.78,
}
result, err := convertConditionValue(mixedValues)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
strs := result.Strings()
expectedValues := []string{"string", "123", "true", "456.78"}
if len(strs) != len(expectedValues) {
t.Errorf("Expected %d values, got %d", len(expectedValues), len(strs))
}
for i, expected := range expectedValues {
if strs[i] != expected {
t.Errorf("Value[%d]: expected '%s', got '%s'", i, expected, strs[i])
}
}
}
func TestConvertPolicyDocumentNil(t *testing.T) {
result, err := ConvertPolicyDocumentToPolicyEngine(nil)
if err != nil {
t.Errorf("Unexpected error for nil input: %v", err)
}
if result != nil {
t.Error("Expected nil result for nil input")
}
}
func TestConvertPrincipalNil(t *testing.T) {
result, err := convertPrincipal(nil)
if err != nil {
t.Errorf("Unexpected error for nil input: %v", err)
}
if result != nil {
t.Error("Expected nil result for nil input")
}
}
func TestConvertPrincipalEmptyArray(t *testing.T) {
// Empty array should return nil
result, err := convertPrincipal([]interface{}{})
if err != nil {
t.Errorf("Unexpected error for empty array: %v", err)
}
if result != nil {
t.Error("Expected nil result for empty array")
}
}
func TestConvertPrincipalUnknownType(t *testing.T) {
// Unknown types should return an error
result, err := convertPrincipal(12345) // Just a number, not valid principal
if err == nil {
t.Error("Expected error for unknown type")
}
if result != nil {
t.Error("Expected nil result for unknown type")
}
}
func TestConvertPrincipalWithNilValues(t *testing.T) {
// Test that nil values in arrays are skipped for security
principalArray := []interface{}{
"arn:aws:iam::123456789012:user/Alice",
nil, // Should be skipped
"arn:aws:iam::123456789012:user/Bob",
nil, // Should be skipped
}
result, err := convertPrincipal(principalArray)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if result == nil {
t.Fatal("Expected non-nil result")
}
strs := result.Strings()
// Should only have 2 values (nil values skipped)
if len(strs) != 2 {
t.Errorf("Expected 2 values (nil values skipped), got %d", len(strs))
}
expectedValues := []string{
"arn:aws:iam::123456789012:user/Alice",
"arn:aws:iam::123456789012:user/Bob",
}
for i, expected := range expectedValues {
if strs[i] != expected {
t.Errorf("Value[%d]: expected '%s', got '%s'", i, expected, strs[i])
}
}
}
func TestConvertConditionValueWithNilValues(t *testing.T) {
// Test that nil values in condition arrays are skipped
mixedValues := []interface{}{
"string",
nil, // Should be skipped
123,
nil, // Should be skipped
true,
}
result, err := convertConditionValue(mixedValues)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
strs := result.Strings()
// Should only have 3 values (nil values skipped)
expectedValues := []string{"string", "123", "true"}
if len(strs) != len(expectedValues) {
t.Errorf("Expected %d values (nil values skipped), got %d", len(expectedValues), len(strs))
}
for i, expected := range expectedValues {
if strs[i] != expected {
t.Errorf("Value[%d]: expected '%s', got '%s'", i, expected, strs[i])
}
}
}
func TestConvertPrincipalMapWithNilValues(t *testing.T) {
// Test AWS-style principal map with nil values
principalMap := map[string]interface{}{
"AWS": []interface{}{
"arn:aws:iam::123456789012:user/Alice",
nil, // Should be skipped
"arn:aws:iam::123456789012:user/Bob",
},
}
result, err := convertPrincipal(principalMap)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if result == nil {
t.Fatal("Expected non-nil result")
}
strs := result.Strings()
// Should only have 2 values (nil value skipped)
if len(strs) != 2 {
t.Errorf("Expected 2 values (nil value skipped), got %d", len(strs))
}
expectedValues := []string{
"arn:aws:iam::123456789012:user/Alice",
"arn:aws:iam::123456789012:user/Bob",
}
for i, expected := range expectedValues {
if strs[i] != expected {
t.Errorf("Value[%d]: expected '%s', got '%s'", i, expected, strs[i])
}
}
}
func TestConvertToStringUnsupportedType(t *testing.T) {
// Test that unsupported types (e.g., nested maps/slices) return empty string
// This should trigger a warning log and return an error
type customStruct struct {
Field string
}
testCases := []struct {
name string
input interface{}
expected string
}{
{
name: "nested map",
input: map[string]interface{}{"key": "value"},
expected: "", // Unsupported, returns empty string
},
{
name: "nested slice",
input: []int{1, 2, 3},
expected: "", // Unsupported, returns empty string
},
{
name: "custom struct",
input: customStruct{Field: "test"},
expected: "", // Unsupported, returns empty string
},
{
name: "function",
input: func() {},
expected: "", // Unsupported, returns empty string
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := convertToString(tc.input)
// For unsupported types, we expect an error
if err == nil {
t.Error("Expected error for unsupported type")
}
if result != tc.expected {
t.Errorf("Expected '%s', got '%s'", tc.expected, result)
}
})
}
}
func TestConvertToStringSupportedTypes(t *testing.T) {
// Test that all supported types convert correctly
testCases := []struct {
name string
input interface{}
expected string
}{
{"string", "test", "test"},
{"bool true", true, "true"},
{"bool false", false, "false"},
{"int", 123, "123"},
{"int8", int8(127), "127"},
{"int16", int16(32767), "32767"},
{"int32", int32(2147483647), "2147483647"},
{"int64", int64(9223372036854775807), "9223372036854775807"},
{"uint", uint(123), "123"},
{"uint8", uint8(255), "255"},
{"uint16", uint16(65535), "65535"},
{"uint32", uint32(4294967295), "4294967295"},
{"uint64", uint64(18446744073709551615), "18446744073709551615"},
{"float32", float32(3.14), "3.14"},
{"float64", float64(3.14159265359), "3.14159265359"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := convertToString(tc.input)
if err != nil {
t.Errorf("Unexpected error for supported type %s: %v", tc.name, err)
}
if result != tc.expected {
t.Errorf("Expected '%s', got '%s'", tc.expected, result)
}
})
}
}
func TestConvertPrincipalUnsupportedTypes(t *testing.T) {
// Test that unsupported principal types return errors
testCases := []struct {
name string
principal interface{}
}{
{
name: "Service principal",
principal: map[string]interface{}{"Service": "s3.amazonaws.com"},
},
{
name: "Federated principal",
principal: map[string]interface{}{"Federated": "arn:aws:iam::123456789012:saml-provider/ExampleProvider"},
},
{
name: "Multiple keys",
principal: map[string]interface{}{"AWS": "arn:...", "Service": "s3.amazonaws.com"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := convertPrincipal(tc.principal)
if err == nil {
t.Error("Expected error for unsupported principal type")
}
if result != nil {
t.Error("Expected nil result for unsupported principal type")
}
})
}
}
func TestConvertPrincipalEmptyStrings(t *testing.T) {
// Test that empty string principals are rejected for security
testCases := []struct {
name string
principal interface{}
wantError string
}{
{
name: "Empty string principal",
principal: "",
wantError: "principal string cannot be empty",
},
{
name: "Empty string in array",
principal: []string{"arn:aws:iam::123456789012:user/Alice", "", "arn:aws:iam::123456789012:user/Bob"},
wantError: "principal string in slice cannot be empty",
},
{
name: "Empty string in interface array",
principal: []interface{}{"arn:aws:iam::123456789012:user/Alice", ""},
wantError: "principal string in slice cannot be empty",
},
{
name: "Empty string in AWS map",
principal: map[string]interface{}{
"AWS": "",
},
wantError: "principal string cannot be empty",
},
{
name: "Empty string in AWS map array",
principal: map[string]interface{}{
"AWS": []string{"arn:aws:iam::123456789012:user/Alice", ""},
},
wantError: "principal string in slice cannot be empty",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, err := convertPrincipal(tc.principal)
if err == nil {
t.Error("Expected error for empty principal string")
} else if !strings.Contains(err.Error(), tc.wantError) {
t.Errorf("Expected error containing %q, got: %v", tc.wantError, err)
}
if result != nil {
t.Error("Expected nil result for empty principal string")
}
})
}
}
func TestConvertStatementWithUnsupportedFields(t *testing.T) {
// Test that errors are returned for unsupported fields
// These fields are critical for policy semantics and ignoring them would be a security risk
testCases := []struct {
name string
statement *policy.Statement
wantError string
}{
{
name: "NotAction field",
statement: &policy.Statement{
Sid: "TestNotAction",
Effect: "Deny",
Action: []string{"s3:GetObject"},
NotAction: []string{"s3:PutObject", "s3:DeleteObject"},
Resource: []string{"arn:aws:s3:::bucket/*"},
},
wantError: "NotAction is not supported",
},
{
name: "NotResource field",
statement: &policy.Statement{
Sid: "TestNotResource",
Effect: "Allow",
Action: []string{"s3:*"},
Resource: []string{"arn:aws:s3:::bucket/*"},
NotResource: []string{"arn:aws:s3:::bucket/secret/*"},
},
wantError: "NotResource is not supported",
},
{
name: "NotPrincipal field",
statement: &policy.Statement{
Sid: "TestNotPrincipal",
Effect: "Deny",
Action: []string{"s3:*"},
Resource: []string{"arn:aws:s3:::bucket/*"},
NotPrincipal: map[string]interface{}{"AWS": "arn:aws:iam::123456789012:user/Admin"},
},
wantError: "NotPrincipal is not supported",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// The conversion should fail with an error for security reasons
result, err := convertStatement(tc.statement)
if err == nil {
t.Error("Expected error for unsupported field, got nil")
} else if !strings.Contains(err.Error(), tc.wantError) {
t.Errorf("Expected error containing %q, got: %v", tc.wantError, err)
}
// Verify zero-value struct is returned on error
if result.Sid != "" || result.Effect != "" {
t.Error("Expected zero-value struct on error")
}
})
}
}
func TestConvertStatementSuccess(t *testing.T) {
// Test successful conversion without unsupported fields
statement := &policy.Statement{
Sid: "AllowGetObject",
Effect: "Allow",
Action: []string{"s3:GetObject"},
Resource: []string{"arn:aws:s3:::bucket/*"},
Principal: map[string]interface{}{
"AWS": "arn:aws:iam::123456789012:user/Alice",
},
}
result, err := convertStatement(statement)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if result.Sid != statement.Sid {
t.Errorf("Expected Sid %q, got %q", statement.Sid, result.Sid)
}
if string(result.Effect) != statement.Effect {
t.Errorf("Expected Effect %q, got %q", statement.Effect, result.Effect)
}
}
func TestConvertPolicyDocumentWithId(t *testing.T) {
// Test that policy document Id field triggers a warning
src := &policy.PolicyDocument{
Version: "2012-10-17",
Id: "MyPolicyId",
Statement: []policy.Statement{
{
Sid: "AllowGetObject",
Effect: "Allow",
Action: []string{"s3:GetObject"},
Resource: []string{"arn:aws:s3:::bucket/*"},
},
},
}
// The conversion should succeed but log a warning about Id
dest, err := ConvertPolicyDocumentToPolicyEngine(src)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if dest == nil {
t.Fatal("Expected non-nil result")
}
// Verify basic conversion worked
if dest.Version != src.Version {
t.Errorf("Expected Version %q, got %q", src.Version, dest.Version)
}
if len(dest.Statement) != 1 {
t.Errorf("Expected 1 statement, got %d", len(dest.Statement))
}
}

334
weed/s3api/s3_action_resolver.go

@ -0,0 +1,334 @@
package s3api
import (
"net/http"
"net/url"
"strings"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
// ResolveS3Action determines the specific S3 action from HTTP request context.
// This is the unified implementation used by both the bucket policy engine
// and the IAM integration for consistent action resolution.
//
// It examines the HTTP method, path, query parameters, and headers to determine
// the most specific S3 action string (e.g., "s3:DeleteObject", "s3:PutObjectTagging").
//
// Parameters:
// - r: HTTP request containing method, URL, query params, and headers
// - baseAction: Coarse-grained action constant (e.g., ACTION_WRITE, ACTION_READ)
// - bucket: Bucket name from the request path
// - object: Object key from the request path (may be empty for bucket operations)
//
// Returns:
// - Specific S3 action string (e.g., "s3:DeleteObject")
// - Falls back to base action mapping if no specific resolution is possible
// - Always returns a valid S3 action string (never empty)
func ResolveS3Action(r *http.Request, baseAction string, bucket string, object string) string {
if r == nil || r.URL == nil {
// No HTTP context available: fall back to coarse-grained mapping
// This ensures consistent behavior and avoids returning empty strings
return mapBaseActionToS3Format(baseAction)
}
method := r.Method
query := r.URL.Query()
// Determine if this is an object or bucket operation
// Note: "/" is treated as bucket-level, not object-level
hasObject := object != "" && object != "/"
// Priority 1: Check for specific query parameters that indicate specific actions
// These override everything else because they explicitly indicate the operation type
if action := resolveFromQueryParameters(query, method, hasObject); action != "" {
return action
}
// Priority 2: Handle basic operations based on method and resource type
// Only use the result if a specific action was resolved; otherwise fall through to Priority 3
if hasObject {
if action := resolveObjectLevelAction(method, baseAction); action != "" {
return action
}
} else if bucket != "" {
if action := resolveBucketLevelAction(method, baseAction); action != "" {
return action
}
}
// Priority 3: Fallback to legacy action mapping
return mapBaseActionToS3Format(baseAction)
}
// bucketQueryActions maps bucket-level query parameters to their corresponding S3 actions by HTTP method
var bucketQueryActions = map[string]map[string]string{
"policy": {
http.MethodGet: s3_constants.S3_ACTION_GET_BUCKET_POLICY,
http.MethodPut: s3_constants.S3_ACTION_PUT_BUCKET_POLICY,
http.MethodDelete: s3_constants.S3_ACTION_DELETE_BUCKET_POLICY,
},
"cors": {
http.MethodGet: s3_constants.S3_ACTION_GET_BUCKET_CORS,
http.MethodPut: s3_constants.S3_ACTION_PUT_BUCKET_CORS,
http.MethodDelete: s3_constants.S3_ACTION_DELETE_BUCKET_CORS,
},
"lifecycle": {
http.MethodGet: s3_constants.S3_ACTION_GET_BUCKET_LIFECYCLE,
http.MethodPut: s3_constants.S3_ACTION_PUT_BUCKET_LIFECYCLE,
http.MethodDelete: s3_constants.S3_ACTION_PUT_BUCKET_LIFECYCLE, // DELETE uses same permission as PUT
},
"versioning": {
http.MethodGet: s3_constants.S3_ACTION_GET_BUCKET_VERSIONING,
http.MethodPut: s3_constants.S3_ACTION_PUT_BUCKET_VERSIONING,
},
"notification": {
http.MethodGet: s3_constants.S3_ACTION_GET_BUCKET_NOTIFICATION,
http.MethodPut: s3_constants.S3_ACTION_PUT_BUCKET_NOTIFICATION,
},
"object-lock": {
http.MethodGet: s3_constants.S3_ACTION_GET_BUCKET_OBJECT_LOCK,
http.MethodPut: s3_constants.S3_ACTION_PUT_BUCKET_OBJECT_LOCK,
},
}
// resolveFromQueryParameters checks query parameters to determine specific S3 actions
func resolveFromQueryParameters(query url.Values, method string, hasObject bool) string {
// Multipart upload operations with uploadId parameter (object-level only)
// All multipart operations require an object in the path
if hasObject && query.Has("uploadId") {
switch method {
case http.MethodPut:
if query.Has("partNumber") {
return s3_constants.S3_ACTION_UPLOAD_PART
}
case http.MethodPost:
return s3_constants.S3_ACTION_COMPLETE_MULTIPART
case http.MethodDelete:
return s3_constants.S3_ACTION_ABORT_MULTIPART
case http.MethodGet:
return s3_constants.S3_ACTION_LIST_PARTS
}
}
// Multipart upload operations
// CreateMultipartUpload: POST /bucket/object?uploads (object-level)
// ListMultipartUploads: GET /bucket?uploads (bucket-level)
if query.Has("uploads") {
if method == http.MethodPost && hasObject {
return s3_constants.S3_ACTION_CREATE_MULTIPART
} else if method == http.MethodGet && !hasObject {
return s3_constants.S3_ACTION_LIST_MULTIPART_UPLOADS
}
}
// ACL operations
if query.Has("acl") {
switch method {
case http.MethodGet, http.MethodHead:
if hasObject {
return s3_constants.S3_ACTION_GET_OBJECT_ACL
}
return s3_constants.S3_ACTION_GET_BUCKET_ACL
case http.MethodPut:
if hasObject {
return s3_constants.S3_ACTION_PUT_OBJECT_ACL
}
return s3_constants.S3_ACTION_PUT_BUCKET_ACL
}
}
// Tagging operations
if query.Has("tagging") {
switch method {
case http.MethodGet:
if hasObject {
return s3_constants.S3_ACTION_GET_OBJECT_TAGGING
}
return s3_constants.S3_ACTION_GET_BUCKET_TAGGING
case http.MethodPut:
if hasObject {
return s3_constants.S3_ACTION_PUT_OBJECT_TAGGING
}
return s3_constants.S3_ACTION_PUT_BUCKET_TAGGING
case http.MethodDelete:
if hasObject {
return s3_constants.S3_ACTION_DELETE_OBJECT_TAGGING
}
return s3_constants.S3_ACTION_DELETE_BUCKET_TAGGING
}
}
// Versioning operations - distinguish between versionId (specific version) and versions (list versions)
// versionId: Used to access/delete a specific version of an object (e.g., GET /bucket/key?versionId=xyz)
if query.Has("versionId") {
if hasObject {
switch method {
case http.MethodGet, http.MethodHead:
return s3_constants.S3_ACTION_GET_OBJECT_VERSION
case http.MethodDelete:
return s3_constants.S3_ACTION_DELETE_OBJECT_VERSION
}
}
}
// versions: Used to list all versions of objects in a bucket (e.g., GET /bucket?versions)
if query.Has("versions") {
if method == http.MethodGet && !hasObject {
return s3_constants.S3_ACTION_LIST_BUCKET_VERSIONS
}
}
// Check bucket-level query parameters using data-driven approach
// These are strictly bucket-level operations, so only apply when !hasObject
if !hasObject {
for param, actions := range bucketQueryActions {
if query.Has(param) {
if action, ok := actions[method]; ok {
return action
}
}
}
}
// Location (GET only, bucket-level)
if query.Has("location") && method == http.MethodGet && !hasObject {
return s3_constants.S3_ACTION_GET_BUCKET_LOCATION
}
// Object retention and legal hold operations (object-level only)
if hasObject {
if query.Has("retention") {
switch method {
case http.MethodGet:
return s3_constants.S3_ACTION_GET_OBJECT_RETENTION
case http.MethodPut:
return s3_constants.S3_ACTION_PUT_OBJECT_RETENTION
}
}
if query.Has("legal-hold") {
switch method {
case http.MethodGet:
return s3_constants.S3_ACTION_GET_OBJECT_LEGAL_HOLD
case http.MethodPut:
return s3_constants.S3_ACTION_PUT_OBJECT_LEGAL_HOLD
}
}
}
// Batch delete - POST request with delete query parameter (bucket-level operation)
// Example: POST /bucket?delete (not POST /bucket/object?delete)
if query.Has("delete") && method == http.MethodPost && !hasObject {
return s3_constants.S3_ACTION_DELETE_OBJECT
}
return ""
}
// resolveObjectLevelAction determines the S3 action for object-level operations
func resolveObjectLevelAction(method string, baseAction string) string {
switch method {
case http.MethodGet, http.MethodHead:
if baseAction == s3_constants.ACTION_READ {
return s3_constants.S3_ACTION_GET_OBJECT
}
case http.MethodPut:
if baseAction == s3_constants.ACTION_WRITE {
// Note: CopyObject operations also use s3:PutObject permission (same as MinIO/AWS)
// Copy requires s3:PutObject on destination and s3:GetObject on source
return s3_constants.S3_ACTION_PUT_OBJECT
}
case http.MethodDelete:
// CRITICAL: Map DELETE method to s3:DeleteObject
// This fixes the architectural limitation where ACTION_WRITE was mapped to s3:PutObject
if baseAction == s3_constants.ACTION_WRITE {
return s3_constants.S3_ACTION_DELETE_OBJECT
}
case http.MethodPost:
// POST without query params is typically multipart or form upload
if baseAction == s3_constants.ACTION_WRITE {
return s3_constants.S3_ACTION_PUT_OBJECT
}
}
return ""
}
// resolveBucketLevelAction determines the S3 action for bucket-level operations
func resolveBucketLevelAction(method string, baseAction string) string {
switch method {
case http.MethodGet, http.MethodHead:
if baseAction == s3_constants.ACTION_LIST || baseAction == s3_constants.ACTION_READ {
return s3_constants.S3_ACTION_LIST_BUCKET
}
case http.MethodPut:
if baseAction == s3_constants.ACTION_WRITE {
return s3_constants.S3_ACTION_CREATE_BUCKET
}
case http.MethodDelete:
if baseAction == s3_constants.ACTION_DELETE_BUCKET {
return s3_constants.S3_ACTION_DELETE_BUCKET
}
case http.MethodPost:
// POST to bucket is typically form upload
if baseAction == s3_constants.ACTION_WRITE {
return s3_constants.S3_ACTION_PUT_OBJECT
}
}
return ""
}
// mapBaseActionToS3Format converts coarse-grained base actions to S3 format
// This is the fallback when no specific resolution is found
func mapBaseActionToS3Format(baseAction string) string {
// Handle actions that already have s3: prefix
if strings.HasPrefix(baseAction, "s3:") {
return baseAction
}
// Map coarse-grained actions to their most common S3 equivalent
// Note: The s3_constants values ARE the string values (e.g., ACTION_READ = "Read")
switch baseAction {
case s3_constants.ACTION_READ: // "Read"
return s3_constants.S3_ACTION_GET_OBJECT
case s3_constants.ACTION_WRITE: // "Write"
return s3_constants.S3_ACTION_PUT_OBJECT
case s3_constants.ACTION_LIST: // "List"
return s3_constants.S3_ACTION_LIST_BUCKET
case s3_constants.ACTION_TAGGING: // "Tagging"
return s3_constants.S3_ACTION_PUT_OBJECT_TAGGING
case s3_constants.ACTION_ADMIN: // "Admin"
return s3_constants.S3_ACTION_ALL
case s3_constants.ACTION_READ_ACP: // "ReadAcp"
return s3_constants.S3_ACTION_GET_OBJECT_ACL
case s3_constants.ACTION_WRITE_ACP: // "WriteAcp"
return s3_constants.S3_ACTION_PUT_OBJECT_ACL
case s3_constants.ACTION_DELETE_BUCKET: // "DeleteBucket"
return s3_constants.S3_ACTION_DELETE_BUCKET
case s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION:
return s3_constants.S3_ACTION_BYPASS_GOVERNANCE
case s3_constants.ACTION_GET_OBJECT_RETENTION:
return s3_constants.S3_ACTION_GET_OBJECT_RETENTION
case s3_constants.ACTION_PUT_OBJECT_RETENTION:
return s3_constants.S3_ACTION_PUT_OBJECT_RETENTION
case s3_constants.ACTION_GET_OBJECT_LEGAL_HOLD:
return s3_constants.S3_ACTION_GET_OBJECT_LEGAL_HOLD
case s3_constants.ACTION_PUT_OBJECT_LEGAL_HOLD:
return s3_constants.S3_ACTION_PUT_OBJECT_LEGAL_HOLD
case s3_constants.ACTION_GET_BUCKET_OBJECT_LOCK_CONFIG:
return s3_constants.S3_ACTION_GET_BUCKET_OBJECT_LOCK
case s3_constants.ACTION_PUT_BUCKET_OBJECT_LOCK_CONFIG:
return s3_constants.S3_ACTION_PUT_BUCKET_OBJECT_LOCK
default:
// For unknown actions, prefix with s3: to maintain format consistency
return "s3:" + baseAction
}
}

84
weed/s3api/s3_constants/s3_action_strings.go

@ -0,0 +1,84 @@
package s3_constants
// S3 action strings for bucket policy evaluation
// These match the official AWS S3 action format used in IAM and bucket policies
const (
// Object operations
S3_ACTION_GET_OBJECT = "s3:GetObject"
S3_ACTION_PUT_OBJECT = "s3:PutObject"
S3_ACTION_DELETE_OBJECT = "s3:DeleteObject"
S3_ACTION_DELETE_OBJECT_VERSION = "s3:DeleteObjectVersion"
S3_ACTION_GET_OBJECT_VERSION = "s3:GetObjectVersion"
// Object ACL operations
S3_ACTION_GET_OBJECT_ACL = "s3:GetObjectAcl"
S3_ACTION_PUT_OBJECT_ACL = "s3:PutObjectAcl"
// Object tagging operations
S3_ACTION_GET_OBJECT_TAGGING = "s3:GetObjectTagging"
S3_ACTION_PUT_OBJECT_TAGGING = "s3:PutObjectTagging"
S3_ACTION_DELETE_OBJECT_TAGGING = "s3:DeleteObjectTagging"
// Object retention and legal hold
S3_ACTION_GET_OBJECT_RETENTION = "s3:GetObjectRetention"
S3_ACTION_PUT_OBJECT_RETENTION = "s3:PutObjectRetention"
S3_ACTION_GET_OBJECT_LEGAL_HOLD = "s3:GetObjectLegalHold"
S3_ACTION_PUT_OBJECT_LEGAL_HOLD = "s3:PutObjectLegalHold"
S3_ACTION_BYPASS_GOVERNANCE = "s3:BypassGovernanceRetention"
// Multipart upload operations
S3_ACTION_CREATE_MULTIPART = "s3:CreateMultipartUpload"
S3_ACTION_UPLOAD_PART = "s3:UploadPart"
S3_ACTION_COMPLETE_MULTIPART = "s3:CompleteMultipartUpload"
S3_ACTION_ABORT_MULTIPART = "s3:AbortMultipartUpload"
S3_ACTION_LIST_PARTS = "s3:ListMultipartUploadParts"
// Bucket operations
S3_ACTION_CREATE_BUCKET = "s3:CreateBucket"
S3_ACTION_DELETE_BUCKET = "s3:DeleteBucket"
S3_ACTION_LIST_BUCKET = "s3:ListBucket"
S3_ACTION_LIST_BUCKET_VERSIONS = "s3:ListBucketVersions"
S3_ACTION_LIST_MULTIPART_UPLOADS = "s3:ListBucketMultipartUploads"
// Bucket ACL operations
S3_ACTION_GET_BUCKET_ACL = "s3:GetBucketAcl"
S3_ACTION_PUT_BUCKET_ACL = "s3:PutBucketAcl"
// Bucket policy operations
S3_ACTION_GET_BUCKET_POLICY = "s3:GetBucketPolicy"
S3_ACTION_PUT_BUCKET_POLICY = "s3:PutBucketPolicy"
S3_ACTION_DELETE_BUCKET_POLICY = "s3:DeleteBucketPolicy"
// Bucket tagging operations
S3_ACTION_GET_BUCKET_TAGGING = "s3:GetBucketTagging"
S3_ACTION_PUT_BUCKET_TAGGING = "s3:PutBucketTagging"
S3_ACTION_DELETE_BUCKET_TAGGING = "s3:DeleteBucketTagging"
// Bucket CORS operations
S3_ACTION_GET_BUCKET_CORS = "s3:GetBucketCors"
S3_ACTION_PUT_BUCKET_CORS = "s3:PutBucketCors"
S3_ACTION_DELETE_BUCKET_CORS = "s3:DeleteBucketCors"
// Bucket lifecycle operations
// Note: Both PUT and DELETE lifecycle operations use s3:PutLifecycleConfiguration
S3_ACTION_GET_BUCKET_LIFECYCLE = "s3:GetLifecycleConfiguration"
S3_ACTION_PUT_BUCKET_LIFECYCLE = "s3:PutLifecycleConfiguration"
// Bucket versioning operations
S3_ACTION_GET_BUCKET_VERSIONING = "s3:GetBucketVersioning"
S3_ACTION_PUT_BUCKET_VERSIONING = "s3:PutBucketVersioning"
// Bucket location
S3_ACTION_GET_BUCKET_LOCATION = "s3:GetBucketLocation"
// Bucket notification
S3_ACTION_GET_BUCKET_NOTIFICATION = "s3:GetBucketNotification"
S3_ACTION_PUT_BUCKET_NOTIFICATION = "s3:PutBucketNotification"
// Bucket object lock operations
S3_ACTION_GET_BUCKET_OBJECT_LOCK = "s3:GetBucketObjectLockConfiguration"
S3_ACTION_PUT_BUCKET_OBJECT_LOCK = "s3:PutBucketObjectLockConfiguration"
// Wildcard for all S3 actions
S3_ACTION_ALL = "s3:*"
)

316
weed/s3api/s3_granular_action_security_test.go

@ -3,12 +3,49 @@ package s3api
import (
"net/http"
"net/url"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/stretchr/testify/assert"
)
// createTestRequestWithQueryParams creates a test HTTP request with query parameters
// and extracts bucket/object from the path. This helper reduces duplication in test setup.
func createTestRequestWithQueryParams(method, path string, queryParams map[string]string) (*http.Request, string, string, error) {
// Parse the URL
u, err := url.Parse(path)
if err != nil {
return nil, "", "", err
}
// Add query parameters
q := u.Query()
for k, v := range queryParams {
q.Add(k, v)
}
u.RawQuery = q.Encode()
// Create HTTP request
req, err := http.NewRequest(method, u.String(), nil)
if err != nil {
return nil, "", "", err
}
// Parse path to extract bucket and object
parts := strings.Split(strings.TrimPrefix(u.Path, "/"), "/")
bucket := ""
object := ""
if len(parts) > 0 {
bucket = parts[0]
}
if len(parts) > 1 {
object = "/" + strings.Join(parts[1:], "/")
}
return req, bucket, object, nil
}
// TestGranularActionMappingSecurity demonstrates how the new granular action mapping
// fixes critical security issues that existed with the previous coarse mapping
func TestGranularActionMappingSecurity(t *testing.T) {
@ -83,10 +120,10 @@ func TestGranularActionMappingSecurity(t *testing.T) {
bucket: "inventory-bucket",
objectKey: "",
queryParams: map[string]string{"uploads": ""},
description: "Listing multipart uploads should map to s3:ListMultipartUploads",
description: "Listing multipart uploads should map to s3:ListBucketMultipartUploads",
problemWithOldMapping: "Old mapping would use generic s3:ListBucket for all bucket operations, " +
"preventing fine-grained control over who can see ongoing multipart operations",
granularActionResult: "s3:ListMultipartUploads",
granularActionResult: "s3:ListBucketMultipartUploads",
},
{
name: "delete_object_tagging_precision",
@ -116,8 +153,8 @@ func TestGranularActionMappingSecurity(t *testing.T) {
}
req.URL.RawQuery = query.Encode()
// Test the new granular action determination
result := determineGranularS3Action(req, s3_constants.ACTION_WRITE, tt.bucket, tt.objectKey)
// Test the granular action determination
result := ResolveS3Action(req, string(s3_constants.ACTION_WRITE), tt.bucket, tt.objectKey)
assert.Equal(t, tt.granularActionResult, result,
"Security Fix Test: %s\n"+
@ -191,7 +228,7 @@ func TestBackwardCompatibilityFallback(t *testing.T) {
URL: &url.URL{Path: "/" + tt.bucket + "/" + tt.objectKey},
}
result := determineGranularS3Action(req, tt.fallbackAction, tt.bucket, tt.objectKey)
result := ResolveS3Action(req, string(tt.fallbackAction), tt.bucket, tt.objectKey)
assert.Equal(t, tt.expectedResult, result,
"Backward Compatibility Test: %s\nDescription: %s\nExpected: %s, Got: %s",
@ -292,16 +329,281 @@ func TestPolicyEnforcementScenarios(t *testing.T) {
}
req.URL.RawQuery = query.Encode()
result := determineGranularS3Action(req, s3_constants.ACTION_WRITE, scenario.bucket, scenario.objectKey)
result := ResolveS3Action(req, string(s3_constants.ACTION_WRITE), scenario.bucket, scenario.objectKey)
assert.Equal(t, scenario.expectedAction, result,
"Policy Enforcement Scenario: %s\nExpected Action: %s, Got: %s",
scenario.name, scenario.expectedAction, result)
t.Logf("🔒 SECURITY SCENARIO: %s", scenario.name)
t.Logf("SECURITY SCENARIO: %s", scenario.name)
t.Logf(" Expected Action: %s", result)
t.Logf(" Security Benefit: %s", scenario.securityBenefit)
t.Logf(" Policy Example:\n%s", scenario.policyExample)
})
}
}
// TestDeleteObjectPolicyEnforcement demonstrates that the architectural limitation has been fixed
// Previously, DeleteObject operations were mapped to s3:PutObject, preventing fine-grained policies from working
func TestDeleteObjectPolicyEnforcement(t *testing.T) {
tests := []struct {
name string
method string
bucket string
objectKey string
baseAction Action
expectedS3Action string
policyScenario string
}{
{
name: "delete_object_maps_to_correct_action",
method: http.MethodDelete,
bucket: "test-bucket",
objectKey: "test-object.txt",
baseAction: s3_constants.ACTION_WRITE,
expectedS3Action: "s3:DeleteObject",
policyScenario: "Policy that denies s3:DeleteObject but allows s3:PutObject should now work correctly",
},
{
name: "put_object_maps_to_correct_action",
method: http.MethodPut,
bucket: "test-bucket",
objectKey: "test-object.txt",
baseAction: s3_constants.ACTION_WRITE,
expectedS3Action: "s3:PutObject",
policyScenario: "Policy that allows s3:PutObject but denies s3:DeleteObject should allow uploads",
},
{
name: "batch_delete_maps_to_delete_action",
method: http.MethodPost,
bucket: "test-bucket",
objectKey: "",
baseAction: s3_constants.ACTION_WRITE,
expectedS3Action: "s3:DeleteObject",
policyScenario: "Batch delete operations should also map to s3:DeleteObject",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create HTTP request
req := &http.Request{
Method: tt.method,
URL: &url.URL{Path: "/" + tt.bucket + "/" + tt.objectKey},
Header: http.Header{},
}
// For batch delete, add the delete query parameter
if tt.method == http.MethodPost && tt.expectedS3Action == "s3:DeleteObject" {
query := req.URL.Query()
query.Set("delete", "")
req.URL.RawQuery = query.Encode()
}
// Test the action resolution
result := ResolveS3Action(req, string(tt.baseAction), tt.bucket, tt.objectKey)
assert.Equal(t, tt.expectedS3Action, result,
"Action Resolution Test: %s\n"+
"HTTP Method: %s\n"+
"Base Action: %s\n"+
"Policy Scenario: %s\n"+
"Expected: %s, Got: %s",
tt.name, tt.method, tt.baseAction, tt.policyScenario, tt.expectedS3Action, result)
t.Logf("ARCHITECTURAL FIX VERIFIED: %s", tt.name)
t.Logf(" Method: %s -> S3 Action: %s", tt.method, result)
t.Logf(" Policy Scenario: %s", tt.policyScenario)
})
}
}
// TestFineGrainedPolicyExample demonstrates a real-world use case that now works
// This test verifies the exact scenario described in the original TODO comment
func TestFineGrainedPolicyExample(t *testing.T) {
// Example policy: Allow PutObject but Deny DeleteObject
// This is a common pattern for "append-only" buckets or write-once scenarios
policyExample := `{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowObjectUploads",
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::test-bucket/*"
},
{
"Sid": "DenyObjectDeletion",
"Effect": "Deny",
"Action": "s3:DeleteObject",
"Resource": "arn:aws:s3:::test-bucket/*"
}
]
}`
testCases := []struct {
operation string
method string
objectKey string
queryParams map[string]string
baseAction Action
expectedAction string
shouldBeAllowed bool
rationale string
}{
{
operation: "PUT object",
method: http.MethodPut,
objectKey: "document.txt",
queryParams: map[string]string{},
baseAction: s3_constants.ACTION_WRITE,
expectedAction: "s3:PutObject",
shouldBeAllowed: true,
rationale: "Policy explicitly allows s3:PutObject - upload should succeed",
},
{
operation: "DELETE object",
method: http.MethodDelete,
objectKey: "document.txt",
queryParams: map[string]string{},
baseAction: s3_constants.ACTION_WRITE,
expectedAction: "s3:DeleteObject",
shouldBeAllowed: false,
rationale: "Policy explicitly denies s3:DeleteObject - deletion should be blocked",
},
{
operation: "Batch DELETE",
method: http.MethodPost,
objectKey: "",
queryParams: map[string]string{"delete": ""},
baseAction: s3_constants.ACTION_WRITE,
expectedAction: "s3:DeleteObject",
shouldBeAllowed: false,
rationale: "Policy explicitly denies s3:DeleteObject - batch deletion should be blocked",
},
}
t.Logf("\nTesting Fine-Grained Policy:")
t.Logf("%s\n", policyExample)
for _, tc := range testCases {
t.Run(tc.operation, func(t *testing.T) {
// Create HTTP request
req := &http.Request{
Method: tc.method,
URL: &url.URL{Path: "/test-bucket/" + tc.objectKey},
Header: http.Header{},
}
// Add query parameters
query := req.URL.Query()
for key, value := range tc.queryParams {
query.Set(key, value)
}
req.URL.RawQuery = query.Encode()
// Determine the S3 action
actualAction := ResolveS3Action(req, string(tc.baseAction), "test-bucket", tc.objectKey)
// Verify the action mapping is correct
assert.Equal(t, tc.expectedAction, actualAction,
"Operation: %s\nExpected Action: %s\nGot: %s",
tc.operation, tc.expectedAction, actualAction)
// Log the result
allowStatus := "[DENIED]"
if tc.shouldBeAllowed {
allowStatus = "[ALLOWED]"
}
t.Logf("%s %s -> %s", allowStatus, tc.operation, actualAction)
t.Logf(" Rationale: %s", tc.rationale)
})
}
t.Logf("\nARCHITECTURAL LIMITATION RESOLVED!")
t.Logf(" Fine-grained policies like 'allow PUT but deny DELETE' now work correctly")
t.Logf(" The policy engine can distinguish between s3:PutObject and s3:DeleteObject")
}
// TestCoarseActionResolution verifies that ResolveS3Action correctly maps
// coarse-grained ACTION_WRITE to specific S3 actions based on HTTP context.
// This demonstrates the fix for the architectural limitation where ACTION_WRITE
// was always mapped to s3:PutObject, preventing fine-grained policies from working.
func TestCoarseActionResolution(t *testing.T) {
testCases := []struct {
name string
method string
path string
queryParams map[string]string
coarseAction Action
expectedS3Action string
policyScenario string
}{
{
name: "PUT_with_ACTION_WRITE_resolves_to_PutObject",
method: http.MethodPut,
path: "/test-bucket/test-file.txt",
queryParams: map[string]string{},
coarseAction: s3_constants.ACTION_WRITE,
expectedS3Action: "s3:PutObject",
policyScenario: "Policy allowing s3:PutObject should match PUT requests",
},
{
name: "DELETE_with_ACTION_WRITE_resolves_to_DeleteObject",
method: http.MethodDelete,
path: "/test-bucket/test-file.txt",
queryParams: map[string]string{},
coarseAction: s3_constants.ACTION_WRITE,
expectedS3Action: "s3:DeleteObject",
policyScenario: "Policy denying s3:DeleteObject should block DELETE requests",
},
{
name: "batch_DELETE_with_ACTION_WRITE_resolves_to_DeleteObject",
method: http.MethodPost,
path: "/test-bucket",
queryParams: map[string]string{"delete": ""},
coarseAction: s3_constants.ACTION_WRITE,
expectedS3Action: "s3:DeleteObject",
policyScenario: "Policy denying s3:DeleteObject should block batch DELETE",
},
{
name: "POST_multipart_with_ACTION_WRITE_resolves_to_CreateMultipartUpload",
method: http.MethodPost,
path: "/test-bucket/large-file.mp4",
queryParams: map[string]string{"uploads": ""},
coarseAction: s3_constants.ACTION_WRITE,
expectedS3Action: "s3:CreateMultipartUpload",
policyScenario: "Policy allowing s3:PutObject but denying s3:CreateMultipartUpload can now work",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create test request with query parameters and extract bucket/object
req, bucket, object, err := createTestRequestWithQueryParams(tc.method, tc.path, tc.queryParams)
assert.NoError(t, err)
// Call ResolveS3Action with coarse action constant
resolvedAction := ResolveS3Action(req, string(tc.coarseAction), bucket, object)
// Verify correct S3 action is resolved
assert.Equal(t, tc.expectedS3Action, resolvedAction,
"Coarse action %s with method %s should resolve to %s",
tc.coarseAction, tc.method, tc.expectedS3Action)
t.Logf("SUCCESS: %s", tc.name)
t.Logf(" Input: %s %s + ACTION_WRITE", tc.method, tc.path)
t.Logf(" Output: %s", resolvedAction)
t.Logf(" Policy impact: %s", tc.policyScenario)
})
}
t.Log("\n=== ARCHITECTURAL LIMITATION RESOLVED ===")
t.Log("Handlers can use coarse ACTION_WRITE constant, and the context-aware")
t.Log("resolver will map it to the correct specific S3 action (PutObject,")
t.Log("DeleteObject, CreateMultipartUpload, etc.) based on HTTP method and")
t.Log("query parameters. This enables fine-grained bucket policies like:")
t.Log(" - Allow s3:PutObject but Deny s3:DeleteObject (append-only)")
t.Log(" - Allow regular uploads but Deny multipart (size limits)")
}

173
weed/s3api/s3_iam_middleware.go

@ -184,7 +184,7 @@ func (s3iam *S3IAMIntegration) AuthorizeAction(ctx context.Context, identity *IA
requestContext := extractRequestContext(r)
// Determine the specific S3 action based on the HTTP request details
specificAction := determineGranularS3Action(r, action, bucket, objectKey)
specificAction := ResolveS3Action(r, string(action), bucket, objectKey)
// Create action request
actionRequest := &integration.ActionRequest{
@ -246,176 +246,11 @@ func buildS3ResourceArn(bucket string, objectKey string) string {
}
// Remove leading slash from object key if present
if strings.HasPrefix(objectKey, "/") {
objectKey = objectKey[1:]
}
objectKey = strings.TrimPrefix(objectKey, "/")
return "arn:aws:s3:::" + bucket + "/" + objectKey
}
// determineGranularS3Action determines the specific S3 IAM action based on HTTP request details
// This provides granular, operation-specific actions for accurate IAM policy enforcement
func determineGranularS3Action(r *http.Request, fallbackAction Action, bucket string, objectKey string) string {
method := r.Method
query := r.URL.Query()
// Check if there are specific query parameters indicating granular operations
// If there are, always use granular mapping regardless of method-action alignment
hasGranularIndicators := hasSpecificQueryParameters(query)
// Only check for method-action mismatch when there are NO granular indicators
// This provides fallback behavior for cases where HTTP method doesn't align with intended action
if !hasGranularIndicators && isMethodActionMismatch(method, fallbackAction) {
return mapLegacyActionToIAM(fallbackAction)
}
// Handle object-level operations when method and action are aligned
if objectKey != "" && objectKey != "/" {
switch method {
case "GET", "HEAD":
// Object read operations - check for specific query parameters
if _, hasAcl := query["acl"]; hasAcl {
return "s3:GetObjectAcl"
}
if _, hasTagging := query["tagging"]; hasTagging {
return "s3:GetObjectTagging"
}
if _, hasRetention := query["retention"]; hasRetention {
return "s3:GetObjectRetention"
}
if _, hasLegalHold := query["legal-hold"]; hasLegalHold {
return "s3:GetObjectLegalHold"
}
if _, hasVersions := query["versions"]; hasVersions {
return "s3:GetObjectVersion"
}
if _, hasUploadId := query["uploadId"]; hasUploadId {
return "s3:ListParts"
}
// Default object read
return "s3:GetObject"
case "PUT", "POST":
// Object write operations - check for specific query parameters
if _, hasAcl := query["acl"]; hasAcl {
return "s3:PutObjectAcl"
}
if _, hasTagging := query["tagging"]; hasTagging {
return "s3:PutObjectTagging"
}
if _, hasRetention := query["retention"]; hasRetention {
return "s3:PutObjectRetention"
}
if _, hasLegalHold := query["legal-hold"]; hasLegalHold {
return "s3:PutObjectLegalHold"
}
// Check for multipart upload operations
if _, hasUploads := query["uploads"]; hasUploads {
return "s3:CreateMultipartUpload"
}
if _, hasUploadId := query["uploadId"]; hasUploadId {
if _, hasPartNumber := query["partNumber"]; hasPartNumber {
return "s3:UploadPart"
}
return "s3:CompleteMultipartUpload" // Complete multipart upload
}
// Default object write
return "s3:PutObject"
case "DELETE":
// Object delete operations
if _, hasTagging := query["tagging"]; hasTagging {
return "s3:DeleteObjectTagging"
}
if _, hasUploadId := query["uploadId"]; hasUploadId {
return "s3:AbortMultipartUpload"
}
// Default object delete
return "s3:DeleteObject"
}
}
// Handle bucket-level operations
if bucket != "" {
switch method {
case "GET", "HEAD":
// Bucket read operations - check for specific query parameters
if _, hasAcl := query["acl"]; hasAcl {
return "s3:GetBucketAcl"
}
if _, hasPolicy := query["policy"]; hasPolicy {
return "s3:GetBucketPolicy"
}
if _, hasTagging := query["tagging"]; hasTagging {
return "s3:GetBucketTagging"
}
if _, hasCors := query["cors"]; hasCors {
return "s3:GetBucketCors"
}
if _, hasVersioning := query["versioning"]; hasVersioning {
return "s3:GetBucketVersioning"
}
if _, hasNotification := query["notification"]; hasNotification {
return "s3:GetBucketNotification"
}
if _, hasObjectLock := query["object-lock"]; hasObjectLock {
return "s3:GetBucketObjectLockConfiguration"
}
if _, hasUploads := query["uploads"]; hasUploads {
return "s3:ListMultipartUploads"
}
if _, hasVersions := query["versions"]; hasVersions {
return "s3:ListBucketVersions"
}
// Default bucket read/list
return "s3:ListBucket"
case "PUT":
// Bucket write operations - check for specific query parameters
if _, hasAcl := query["acl"]; hasAcl {
return "s3:PutBucketAcl"
}
if _, hasPolicy := query["policy"]; hasPolicy {
return "s3:PutBucketPolicy"
}
if _, hasTagging := query["tagging"]; hasTagging {
return "s3:PutBucketTagging"
}
if _, hasCors := query["cors"]; hasCors {
return "s3:PutBucketCors"
}
if _, hasVersioning := query["versioning"]; hasVersioning {
return "s3:PutBucketVersioning"
}
if _, hasNotification := query["notification"]; hasNotification {
return "s3:PutBucketNotification"
}
if _, hasObjectLock := query["object-lock"]; hasObjectLock {
return "s3:PutBucketObjectLockConfiguration"
}
// Default bucket creation
return "s3:CreateBucket"
case "DELETE":
// Bucket delete operations - check for specific query parameters
if _, hasPolicy := query["policy"]; hasPolicy {
return "s3:DeleteBucketPolicy"
}
if _, hasTagging := query["tagging"]; hasTagging {
return "s3:DeleteBucketTagging"
}
if _, hasCors := query["cors"]; hasCors {
return "s3:DeleteBucketCors"
}
// Default bucket delete
return "s3:DeleteBucket"
}
}
// Fallback to legacy mapping for specific known actions
return mapLegacyActionToIAM(fallbackAction)
}
// hasSpecificQueryParameters checks if the request has query parameters that indicate specific granular operations
func hasSpecificQueryParameters(query url.Values) bool {
// Check for object-level operation indicators
@ -525,9 +360,9 @@ func mapLegacyActionToIAM(legacyAction Action) string {
case s3_constants.ACTION_ABORT_MULTIPART:
return "s3:AbortMultipartUpload"
case s3_constants.ACTION_LIST_MULTIPART_UPLOADS:
return "s3:ListMultipartUploads"
return s3_constants.S3_ACTION_LIST_MULTIPART_UPLOADS
case s3_constants.ACTION_LIST_PARTS:
return "s3:ListParts"
return s3_constants.S3_ACTION_LIST_PARTS
default:
// If it's already a properly formatted S3 action, return as-is

6
weed/s3api/s3_iam_simple_test.go

@ -294,7 +294,7 @@ func TestDetermineGranularS3Action(t *testing.T) {
objectKey: "",
queryParams: map[string]string{"uploads": ""},
fallbackAction: s3_constants.ACTION_LIST,
expected: "s3:ListMultipartUploads",
expected: "s3:ListBucketMultipartUploads",
description: "List multipart uploads in bucket",
},
@ -336,8 +336,8 @@ func TestDetermineGranularS3Action(t *testing.T) {
}
req.URL.RawQuery = query.Encode()
// Test the granular action determination
result := determineGranularS3Action(req, tt.fallbackAction, tt.bucket, tt.objectKey)
// Test the action determination
result := ResolveS3Action(req, string(tt.fallbackAction), tt.bucket, tt.objectKey)
assert.Equal(t, tt.expected, result,
"Test %s failed: %s. Expected %s but got %s",

48
weed/s3api/s3_list_parts_action_test.go

@ -39,8 +39,8 @@ func TestListPartsActionMapping(t *testing.T) {
objectKey: "test-object.txt",
queryParams: map[string]string{"uploadId": "test-upload-id"},
fallbackAction: s3_constants.ACTION_READ,
expectedAction: "s3:ListParts",
description: "GET request with uploadId should map to s3:ListParts (this was the missing mapping)",
expectedAction: "s3:ListMultipartUploadParts",
description: "GET request with uploadId should map to s3:ListMultipartUploadParts (this was the missing mapping)",
},
{
name: "get_object_with_uploadId_and_other_params",
@ -53,18 +53,18 @@ func TestListPartsActionMapping(t *testing.T) {
"part-number-marker": "50",
},
fallbackAction: s3_constants.ACTION_READ,
expectedAction: "s3:ListParts",
description: "GET request with uploadId plus other multipart params should map to s3:ListParts",
expectedAction: "s3:ListMultipartUploadParts",
description: "GET request with uploadId plus other multipart params should map to s3:ListMultipartUploadParts",
},
{
name: "get_object_versions",
name: "get_object_with_versionId",
method: "GET",
bucket: "test-bucket",
objectKey: "test-object.txt",
queryParams: map[string]string{"versions": ""},
queryParams: map[string]string{"versionId": "version-123"},
fallbackAction: s3_constants.ACTION_READ,
expectedAction: "s3:GetObjectVersion",
description: "GET request with versions should still map to s3:GetObjectVersion (precedence check)",
description: "GET request with versionId should map to s3:GetObjectVersion",
},
{
name: "get_object_acl_without_uploadId",
@ -103,8 +103,8 @@ func TestListPartsActionMapping(t *testing.T) {
}
req.URL.RawQuery = query.Encode()
// Call the granular action determination function
action := determineGranularS3Action(req, tc.fallbackAction, tc.bucket, tc.objectKey)
// Call the action resolver directly
action := ResolveS3Action(req, string(tc.fallbackAction), tc.bucket, tc.objectKey)
// Verify the action mapping
assert.Equal(t, tc.expectedAction, action,
@ -127,17 +127,17 @@ func TestListPartsActionMappingSecurityScenarios(t *testing.T) {
query1 := req1.URL.Query()
query1.Set("uploadId", "active-upload-123")
req1.URL.RawQuery = query1.Encode()
action1 := determineGranularS3Action(req1, s3_constants.ACTION_READ, "secure-bucket", "confidential-document.pdf")
action1 := ResolveS3Action(req1, string(s3_constants.ACTION_READ), "secure-bucket", "confidential-document.pdf")
// Test request 2: Get object without uploadId
req2 := &http.Request{
Method: "GET",
URL: &url.URL{Path: "/secure-bucket/confidential-document.pdf"},
}
action2 := determineGranularS3Action(req2, s3_constants.ACTION_READ, "secure-bucket", "confidential-document.pdf")
action2 := ResolveS3Action(req2, string(s3_constants.ACTION_READ), "secure-bucket", "confidential-document.pdf")
// These should be different actions, allowing different permissions
assert.Equal(t, "s3:ListParts", action1, "Listing multipart parts should require s3:ListParts permission")
assert.Equal(t, "s3:ListMultipartUploadParts", action1, "Listing multipart parts should require s3:ListMultipartUploadParts permission")
assert.Equal(t, "s3:GetObject", action2, "Reading object content should require s3:GetObject permission")
assert.NotEqual(t, action1, action2, "ListParts and GetObject should be separate permissions for security")
})
@ -155,8 +155,8 @@ func TestListPartsActionMappingSecurityScenarios(t *testing.T) {
{
description: "List multipart upload parts",
queryParams: map[string]string{"uploadId": "upload-abc123"},
expectedAction: "s3:ListParts",
securityNote: "FIXED: Now correctly maps to s3:ListParts instead of s3:GetObject",
expectedAction: "s3:ListMultipartUploadParts",
securityNote: "FIXED: Now correctly maps to s3:ListMultipartUploadParts instead of s3:GetObject",
},
{
description: "Get actual object content",
@ -167,7 +167,7 @@ func TestListPartsActionMappingSecurityScenarios(t *testing.T) {
{
description: "Get object with complex upload ID",
queryParams: map[string]string{"uploadId": "complex-upload-id-with-hyphens-123-abc-def"},
expectedAction: "s3:ListParts",
expectedAction: "s3:ListMultipartUploadParts",
securityNote: "FIXED: Complex upload IDs now correctly detected",
},
}
@ -184,7 +184,7 @@ func TestListPartsActionMappingSecurityScenarios(t *testing.T) {
}
req.URL.RawQuery = query.Encode()
action := determineGranularS3Action(req, s3_constants.ACTION_READ, "test-bucket", "test-object")
action := ResolveS3Action(req, string(s3_constants.ACTION_READ), "test-bucket", "test-object")
assert.Equal(t, tc.expectedAction, action,
"%s - %s", tc.description, tc.securityNote)
@ -205,7 +205,7 @@ func TestListPartsActionRealWorldScenarios(t *testing.T) {
query1 := req1.URL.Query()
query1.Set("uploads", "")
req1.URL.RawQuery = query1.Encode()
action1 := determineGranularS3Action(req1, s3_constants.ACTION_WRITE, "data", "large-dataset.csv")
action1 := ResolveS3Action(req1, string(s3_constants.ACTION_WRITE), "data", "large-dataset.csv")
// Step 2: List existing parts (GET with uploadId query) - THIS WAS THE MISSING MAPPING
req2 := &http.Request{
@ -215,7 +215,7 @@ func TestListPartsActionRealWorldScenarios(t *testing.T) {
query2 := req2.URL.Query()
query2.Set("uploadId", "dataset-upload-20240827-001")
req2.URL.RawQuery = query2.Encode()
action2 := determineGranularS3Action(req2, s3_constants.ACTION_READ, "data", "large-dataset.csv")
action2 := ResolveS3Action(req2, string(s3_constants.ACTION_READ), "data", "large-dataset.csv")
// Step 3: Upload a part (PUT with uploadId and partNumber)
req3 := &http.Request{
@ -226,7 +226,7 @@ func TestListPartsActionRealWorldScenarios(t *testing.T) {
query3.Set("uploadId", "dataset-upload-20240827-001")
query3.Set("partNumber", "5")
req3.URL.RawQuery = query3.Encode()
action3 := determineGranularS3Action(req3, s3_constants.ACTION_WRITE, "data", "large-dataset.csv")
action3 := ResolveS3Action(req3, string(s3_constants.ACTION_WRITE), "data", "large-dataset.csv")
// Step 4: Complete multipart upload (POST with uploadId)
req4 := &http.Request{
@ -236,11 +236,11 @@ func TestListPartsActionRealWorldScenarios(t *testing.T) {
query4 := req4.URL.Query()
query4.Set("uploadId", "dataset-upload-20240827-001")
req4.URL.RawQuery = query4.Encode()
action4 := determineGranularS3Action(req4, s3_constants.ACTION_WRITE, "data", "large-dataset.csv")
action4 := ResolveS3Action(req4, string(s3_constants.ACTION_WRITE), "data", "large-dataset.csv")
// Verify each step has the correct action mapping
assert.Equal(t, "s3:CreateMultipartUpload", action1, "Step 1: Initiate upload")
assert.Equal(t, "s3:ListParts", action2, "Step 2: List parts (FIXED by this PR)")
assert.Equal(t, "s3:ListMultipartUploadParts", action2, "Step 2: List parts (FIXED by this PR)")
assert.Equal(t, "s3:UploadPart", action3, "Step 3: Upload part")
assert.Equal(t, "s3:CompleteMultipartUpload", action4, "Step 4: Complete upload")
@ -277,10 +277,10 @@ func TestListPartsActionRealWorldScenarios(t *testing.T) {
query.Set("uploadId", uploadId)
req.URL.RawQuery = query.Encode()
action := determineGranularS3Action(req, s3_constants.ACTION_READ, "test-bucket", "test-file.bin")
action := ResolveS3Action(req, string(s3_constants.ACTION_READ), "test-bucket", "test-file.bin")
assert.Equal(t, "s3:ListParts", action,
"Upload ID format %s should be correctly detected and mapped to s3:ListParts", uploadId)
assert.Equal(t, "s3:ListMultipartUploadParts", action,
"Upload ID format %s should be correctly detected and mapped to s3:ListMultipartUploadParts", uploadId)
}
})
}

4
weed/s3api/s3_multipart_iam_test.go

@ -546,8 +546,8 @@ func setupTestRolesForMultipart(ctx context.Context, manager *integration.IAMMan
"s3:UploadPart",
"s3:CompleteMultipartUpload",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploads",
"s3:ListParts",
"s3:ListBucketMultipartUploads",
"s3:ListMultipartUploadParts",
},
Resource: []string{
"arn:aws:s3:::*",

3
weed/s3api/s3api_bucket_handlers.go

@ -610,7 +610,8 @@ func (s3a *S3ApiServer) AuthWithPublicRead(handler http.HandlerFunc, action Acti
// Check bucket policy for anonymous access using the policy engine
principal := "*" // Anonymous principal
allowed, evaluated, err := s3a.policyEngine.EvaluatePolicy(bucket, object, string(action), principal)
// Use context-aware policy evaluation to get the correct S3 action
allowed, evaluated, err := s3a.policyEngine.EvaluatePolicyWithContext(bucket, object, string(action), principal, r)
if err != nil {
// SECURITY: Fail-close on policy evaluation errors
// If we can't evaluate the policy, deny access rather than falling through to IAM

138
weed/s3api/s3api_bucket_policy_engine.go

@ -3,13 +3,12 @@ package s3api
import (
"encoding/json"
"fmt"
"strings"
"net/http"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/iam/policy"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
// BucketPolicyEngine wraps the policy_engine to provide bucket policy evaluation
@ -49,11 +48,8 @@ func (bpe *BucketPolicyEngine) LoadBucketPolicy(bucket string, entry *filer_pb.E
// LoadBucketPolicyFromCache loads a bucket policy from a cached BucketConfig
//
// NOTE: This function uses JSON marshaling/unmarshaling to convert between
// policy.PolicyDocument and policy_engine.PolicyDocument. This is inefficient
// but necessary because the two types are defined in different packages and
// have subtle differences. A future improvement would be to unify these types
// or create a direct conversion function for better performance and type safety.
// This function uses a type-safe conversion function to convert between
// policy.PolicyDocument and policy_engine.PolicyDocument with explicit field mapping and error handling.
func (bpe *BucketPolicyEngine) LoadBucketPolicyFromCache(bucket string, policyDoc *policy.PolicyDocument) error {
if policyDoc == nil {
// No policy for this bucket - remove it if it exists
@ -61,10 +57,16 @@ func (bpe *BucketPolicyEngine) LoadBucketPolicyFromCache(bucket string, policyDo
return nil
}
// Convert policy.PolicyDocument to policy_engine.PolicyDocument
// We use JSON marshaling as an intermediate format since both types
// follow the same AWS S3 policy structure
policyJSON, err := json.Marshal(policyDoc)
// Convert policy.PolicyDocument to policy_engine.PolicyDocument without a JSON round-trip
// This removes the prior intermediate marshal/unmarshal and adds type safety
enginePolicyDoc, err := ConvertPolicyDocumentToPolicyEngine(policyDoc)
if err != nil {
glog.Errorf("Failed to convert bucket policy for %s: %v", bucket, err)
return fmt.Errorf("failed to convert bucket policy: %w", err)
}
// Marshal the converted policy to JSON for storage in the engine
policyJSON, err := json.Marshal(enginePolicyDoc)
if err != nil {
glog.Errorf("Failed to marshal bucket policy for %s: %v", bucket, err)
return err
@ -99,8 +101,8 @@ func (bpe *BucketPolicyEngine) EvaluatePolicy(bucket, object, action, principal
return false, false, fmt.Errorf("action cannot be empty")
}
// Convert action to S3 action format
s3Action := convertActionToS3Format(action)
// Convert action to S3 action format using base mapping (no HTTP context available)
s3Action := mapBaseActionToS3Format(action)
// Build resource ARN
resource := buildResourceARN(bucket, object)
@ -132,72 +134,52 @@ func (bpe *BucketPolicyEngine) EvaluatePolicy(bucket, object, action, principal
}
}
// convertActionToS3Format converts internal action strings to S3 action format
//
// KNOWN LIMITATION: The current Action type uses coarse-grained constants
// (ACTION_READ, ACTION_WRITE, etc.) that map to specific S3 actions, but these
// are used for multiple operations. For example, ACTION_WRITE is used for both
// PutObject and DeleteObject, but this function maps it to only s3:PutObject.
// This means bucket policies requiring fine-grained permissions (e.g., allowing
// s3:DeleteObject but not s3:PutObject) will not work correctly.
//
// TODO: Refactor to use specific S3 action strings throughout the S3 API handlers
// instead of coarse-grained Action constants. This is a major architectural change
// that should be done in a separate PR.
//
// This function explicitly maps all known actions to prevent security issues from
// overly permissive default behavior.
func convertActionToS3Format(action string) string {
// Handle multipart actions that already have s3: prefix
if strings.HasPrefix(action, "s3:") {
return action
}
// Explicit mapping for all known actions
switch action {
// Basic operations
case s3_constants.ACTION_READ:
return "s3:GetObject"
case s3_constants.ACTION_WRITE:
return "s3:PutObject"
case s3_constants.ACTION_LIST:
return "s3:ListBucket"
case s3_constants.ACTION_TAGGING:
return "s3:PutObjectTagging"
case s3_constants.ACTION_ADMIN:
return "s3:*"
// ACL operations
case s3_constants.ACTION_READ_ACP:
return "s3:GetObjectAcl"
case s3_constants.ACTION_WRITE_ACP:
return "s3:PutObjectAcl"
// Bucket operations
case s3_constants.ACTION_DELETE_BUCKET:
return "s3:DeleteBucket"
// Object Lock operations
case s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION:
return "s3:BypassGovernanceRetention"
case s3_constants.ACTION_GET_OBJECT_RETENTION:
return "s3:GetObjectRetention"
case s3_constants.ACTION_PUT_OBJECT_RETENTION:
return "s3:PutObjectRetention"
case s3_constants.ACTION_GET_OBJECT_LEGAL_HOLD:
return "s3:GetObjectLegalHold"
case s3_constants.ACTION_PUT_OBJECT_LEGAL_HOLD:
return "s3:PutObjectLegalHold"
case s3_constants.ACTION_GET_BUCKET_OBJECT_LOCK_CONFIG:
return "s3:GetBucketObjectLockConfiguration"
case s3_constants.ACTION_PUT_BUCKET_OBJECT_LOCK_CONFIG:
return "s3:PutBucketObjectLockConfiguration"
// EvaluatePolicyWithContext evaluates whether an action is allowed by bucket policy using HTTP request context
// This version uses the HTTP request to determine the actual S3 action more accurately
func (bpe *BucketPolicyEngine) EvaluatePolicyWithContext(bucket, object, action, principal string, r *http.Request) (allowed bool, evaluated bool, err error) {
// Validate required parameters
if bucket == "" {
return false, false, fmt.Errorf("bucket cannot be empty")
}
if action == "" {
return false, false, fmt.Errorf("action cannot be empty")
}
// Convert action to S3 action format using request context
// ResolveS3Action handles nil request internally (falls back to mapBaseActionToS3Format)
s3Action := ResolveS3Action(r, action, bucket, object)
// Build resource ARN
resource := buildResourceARN(bucket, object)
glog.V(4).Infof("EvaluatePolicyWithContext: bucket=%s, resource=%s, action=%s (from %s), principal=%s",
bucket, resource, s3Action, action, principal)
// Evaluate using the policy engine
args := &policy_engine.PolicyEvaluationArgs{
Action: s3Action,
Resource: resource,
Principal: principal,
}
result := bpe.engine.EvaluatePolicy(bucket, args)
switch result {
case policy_engine.PolicyResultAllow:
glog.V(3).Infof("EvaluatePolicyWithContext: ALLOW - bucket=%s, action=%s, principal=%s", bucket, s3Action, principal)
return true, true, nil
case policy_engine.PolicyResultDeny:
glog.V(3).Infof("EvaluatePolicyWithContext: DENY - bucket=%s, action=%s, principal=%s", bucket, s3Action, principal)
return false, true, nil
case policy_engine.PolicyResultIndeterminate:
// No policy exists for this bucket
glog.V(4).Infof("EvaluatePolicyWithContext: INDETERMINATE (no policy) - bucket=%s", bucket)
return false, false, nil
default:
// Log warning for unmapped actions to help catch issues
glog.Warningf("convertActionToS3Format: unmapped action '%s', prefixing with 's3:'", action)
// For unknown actions, prefix with s3: to maintain format consistency
// This maintains backward compatibility while alerting developers
return "s3:" + action
return false, false, fmt.Errorf("unknown policy result: %v", result)
}
}
// NOTE: The convertActionToS3Format wrapper has been removed for simplicity.
// EvaluatePolicy and EvaluatePolicyWithContext now call ResolveS3Action or
// mapBaseActionToS3Format directly, making the control flow more explicit.

9
weed/s3api/s3api_object_handlers_put.go

@ -135,7 +135,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
versioningEnabled := (versioningState == s3_constants.VersioningEnabled)
versioningConfigured := (versioningState != "")
glog.V(0).Infof("PutObjectHandler: bucket=%s, object=%s, versioningState='%s', versioningEnabled=%v, versioningConfigured=%v", bucket, object, versioningState, versioningEnabled, versioningConfigured)
glog.V(2).Infof("PutObjectHandler: bucket=%s, object=%s, versioningState='%s', versioningEnabled=%v, versioningConfigured=%v", bucket, object, versioningState, versioningEnabled, versioningConfigured)
// Validate object lock headers before processing
if err := s3a.validateObjectLockHeaders(r, versioningEnabled); err != nil {
@ -155,7 +155,8 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
}
}
if versioningState == s3_constants.VersioningEnabled {
switch versioningState {
case s3_constants.VersioningEnabled:
// Handle enabled versioning - create new versions with real version IDs
glog.V(0).Infof("PutObjectHandler: ENABLED versioning detected for %s/%s, calling putVersionedObject", bucket, object)
versionId, etag, errCode := s3a.putVersionedObject(r, bucket, object, dataReader, objectContentType)
@ -177,7 +178,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// Set ETag in response
setEtag(w, etag)
} else if versioningState == s3_constants.VersioningSuspended {
case s3_constants.VersioningSuspended:
// Handle suspended versioning - overwrite with "null" version ID but preserve existing versions
etag, errCode := s3a.putSuspendedVersioningObject(r, bucket, object, dataReader, objectContentType)
if errCode != s3err.ErrNone {
@ -190,7 +191,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// Set ETag in response
setEtag(w, etag)
} else {
default:
// Handle regular PUT (never configured versioning)
uploadUrl := s3a.toFilerUrl(bucket, object)
if objectContentType == "" {

12
weed/s3api/s3api_server.go

@ -86,9 +86,10 @@ func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, expl
option.AllowedOrigins = domains
}
var iam *IdentityAccessManagement
iam := NewIdentityAccessManagementWithStore(option, explicitStore)
iam = NewIdentityAccessManagementWithStore(option, explicitStore)
// Initialize bucket policy engine first
policyEngine := NewBucketPolicyEngine()
s3ApiServer = &S3ApiServer{
option: option,
@ -98,11 +99,12 @@ func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, expl
cb: NewCircuitBreaker(option),
credentialManager: iam.credentialManager,
bucketConfigCache: NewBucketConfigCache(60 * time.Minute), // Increased TTL since cache is now event-driven
policyEngine: NewBucketPolicyEngine(), // Initialize bucket policy engine
policyEngine: policyEngine, // Initialize bucket policy engine
}
// Link IAM back to server for bucket policy evaluation
iam.s3ApiServer = s3ApiServer
// Pass policy engine to IAM for bucket policy evaluation
// This avoids circular dependency by not passing the entire S3ApiServer
iam.policyEngine = policyEngine
// Initialize advanced IAM system if config is provided
if option.IamConfig != "" {

6
weed/server/volume_server_handlers.go

@ -75,6 +75,10 @@ func (vs *VolumeServer) checkDownloadLimit(w http.ResponseWriter, r *http.Reques
// - true: Request was handled (either proxied successfully or failed with error response)
// - false: No proxy available (volume has no replicas or request already proxied)
func (vs *VolumeServer) tryProxyToReplica(w http.ResponseWriter, r *http.Request) bool {
if r.URL.Query().Get(reqIsProxied) == "true" {
return false // already proxied
}
vid, _, _, _, _ := parseURLPath(r.URL.Path)
volumeId, err := needle.NewVolumeId(vid)
if err != nil {
@ -84,7 +88,7 @@ func (vs *VolumeServer) tryProxyToReplica(w http.ResponseWriter, r *http.Request
}
volume := vs.store.GetVolume(volumeId)
if volume != nil && volume.ReplicaPlacement != nil && volume.ReplicaPlacement.HasReplication() && r.URL.Query().Get(reqIsProxied) != "true" {
if volume != nil && volume.ReplicaPlacement != nil && volume.ReplicaPlacement.HasReplication() {
vs.proxyReqToTargetServer(w, r)
return true // handled by proxy
}

10
weed/server/volume_server_handlers_read.go

@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"math/rand/v2"
"mime"
"net/http"
"net/url"
@ -59,6 +60,11 @@ func (vs *VolumeServer) proxyReqToTargetServer(w http.ResponseWriter, r *http.Re
NotFound(w)
return
}
if len(lookupResult.Locations) >= 2 {
rand.Shuffle(len(lookupResult.Locations), func(i, j int) {
lookupResult.Locations[i], lookupResult.Locations[j] = lookupResult.Locations[j], lookupResult.Locations[i]
})
}
var tragetUrl *url.URL
location := fmt.Sprintf("%s:%d", vs.store.Ip, vs.store.Port)
for _, loc := range lookupResult.Locations {
@ -79,7 +85,9 @@ func (vs *VolumeServer) proxyReqToTargetServer(w http.ResponseWriter, r *http.Re
// proxy client request to target server
r.URL.Host = tragetUrl.Host
r.URL.Scheme = tragetUrl.Scheme
r.URL.Query().Add(reqIsProxied, "true")
query := r.URL.Query()
query.Set(reqIsProxied, "true")
r.URL.RawQuery = query.Encode()
request, err := http.NewRequest(http.MethodGet, r.URL.String(), nil)
if err != nil {
glog.V(0).Infof("failed to instance http request of url %s: %v", r.URL.String(), err)

38
weed/shell/command_volume_check_disk.go

@ -66,10 +66,11 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same")
verbose := fsckCommand.Bool("v", false, "verbose mode")
volumeId := fsckCommand.Uint("volumeId", 0, "the volume id")
volumeId := fsckCommand.Uint("volumeId", 0, "the volume ID (0 for all)")
applyChanges := fsckCommand.Bool("apply", false, "apply the fix")
// TODO: remove this alias
applyChangesAlias := fsckCommand.Bool("force", false, "apply the fix (alias for -apply)")
forceReadonly := fsckCommand.Bool("force-readonly", false, "apply the fix even on readonly volumes")
syncDeletions := fsckCommand.Bool("syncDeleted", false, "sync of deletions the fix")
nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit")
if err = fsckCommand.Parse(args); err != nil {
@ -100,13 +101,37 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
if err != nil {
return err
}
// collect volume replicas, optionally filtered by volume ID
volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
if vid := uint32(*volumeId); vid > 0 {
if replicas, ok := volumeReplicas[vid]; ok {
volumeReplicas = map[uint32][]*VolumeReplica{
vid: replicas,
}
} else {
return fmt.Errorf("volume %d not found", vid)
}
}
vcd.write("Pass #1 (writeable volumes)\n")
if err := vcd.checkWriteableVolumes(volumeReplicas); err != nil {
return err
}
if *forceReadonly {
vcd.write("Pass #2 (read-only volumes)\n")
if err := vcd.checkReadOnlyVolumes(volumeReplicas); err != nil {
return err
}
}
return nil
}
// checkWriteableVolumes fixes volume replicas which are not read-only.
func (vcd *volumeCheckDisk) checkWriteableVolumes(volumeReplicas map[uint32][]*VolumeReplica) error {
// pick 1 pairs of volume replica
for _, replicas := range volumeReplicas {
if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
continue
}
// filter readonly replica
var writableReplicas []*VolumeReplica
for _, replica := range replicas {
@ -148,6 +173,11 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
return nil
}
// checkReadOnlyVolumes fixes read-only volume replicas.
func (vcd *volumeCheckDisk) checkReadOnlyVolumes(volumeReplicas map[uint32][]*VolumeReplica) error {
return fmt.Errorf("not yet implemented (https://github.com/seaweedfs/seaweedfs/issues/7442)")
}
func (vcd *volumeCheckDisk) isLocked() bool {
return vcd.commandEnv.isLocked()
}

Loading…
Cancel
Save