Browse Source
IAM Policy Management via gRPC (#8109)
IAM Policy Management via gRPC (#8109)
* Add IAM gRPC service definition - Add GetConfiguration/PutConfiguration for config management - Add CreateUser/GetUser/UpdateUser/DeleteUser/ListUsers for user management - Add CreateAccessKey/DeleteAccessKey/GetUserByAccessKey for access key management - Methods mirror existing IAM HTTP API functionality * Add IAM gRPC handlers on filer server - Implement IamGrpcServer with CredentialManager integration - Handle configuration get/put operations - Handle user CRUD operations - Handle access key create/delete operations - All methods delegate to CredentialManager for actual storage * Wire IAM gRPC service to filer server - Add CredentialManager field to FilerOption and FilerServer - Import credential store implementations in filer command - Initialize CredentialManager from credential.toml if available - Register IAM gRPC service on filer gRPC server - Enable credential management via gRPC alongside existing filer services * Regenerate IAM protobuf with gRPC service methods * iam_pb: add Policy Management to protobuf definitions * credential: implement PolicyManager in credential stores * filer: implement IAM Policy Management RPCs * shell: add s3.policy command * test: add integration test for s3.policy * test: fix compilation errors in policy_test * pb * fmt * test * weed shell: add -policies flag to s3.configure This allows linking/unlinking IAM policies to/from identities directly from the s3.configure command. * test: verify s3.configure policy linking and fix port allocation - Added test case for linking policies to users via s3.configure - Implemented findAvailablePortPair to ensure HTTP and gRPC ports are both available, avoiding conflicts with randomized port assignments. - Updated assertion to match jsonpb output (policyNames) * credential: add StoreTypeGrpc constant * credential: add IAM gRPC store boilerplate * credential: implement identity methods in gRPC store * credential: implement policy methods in gRPC store * admin: use gRPC credential store for AdminServer This ensures that all IAM and policy changes made through the Admin UI are persisted via the Filer's IAM gRPC service instead of direct file manipulation. * shell: s3.configure use granular IAM gRPC APIs instead of full config patching * shell: s3.configure use granular IAM gRPC APIs * shell: replace deprecated ioutil with os in s3.policy * filer: use gRPC FailedPrecondition for unconfigured credential manager * test: improve s3.policy integration tests and fix error checks * ci: add s3 policy shell integration tests to github workflow * filer: fix LoadCredentialConfiguration error handling * credential/grpc: propagate unmarshal errors in GetPolicies * filer/grpc: improve error handling and validation * shell: use gRPC status codes in s3.configure * credential: document PutPolicy as create-or-replace * credential/postgres: reuse CreatePolicy in PutPolicy to deduplicate logic * shell: add timeout context and strictly enforce flags in s3.policy * iam: standardize policy content field naming in gRPC and proto * shell: extract slice helper functions in s3.configure * filer: map credential store errors to gRPC status codes * filer: add input validation for UpdateUser and CreateAccessKey * iam: improve validation in policy and config handlers * filer: ensure IAM service registration by defaulting credential manager * credential: add GetStoreName method to manager * test: verify policy deletion in integration testpull/8043/merge
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 4028 additions and 916 deletions
-
35.github/workflows/s3-policy-tests.yml
-
296test/s3/policy/policy_test.go
-
4weed/admin/dash/admin_server.go
-
24weed/command/filer.go
-
29weed/credential/credential_manager.go
-
8weed/credential/credential_store.go
-
5weed/credential/filer_etc/filer_etc_policy.go
-
8weed/credential/filer_multiple/filer_multiple_store.go
-
120weed/credential/grpc/grpc_identity.go
-
69weed/credential/grpc/grpc_policy.go
-
72weed/credential/grpc/grpc_store.go
-
13weed/credential/memory/memory_policy.go
-
5weed/credential/postgres/postgres_policy.go
-
7weed/pb/filer_pb/filer.pb.go
-
3weed/pb/filer_pb/filer_grpc.pb.go
-
146weed/pb/iam.proto
-
1836weed/pb/iam_pb/iam.pb.go
-
555weed/pb/iam_pb/iam_grpc.pb.go
-
2weed/pb/master_pb/master.pb.go
-
2weed/pb/master_pb/master_grpc.pb.go
-
7weed/pb/mount_pb/mount.pb.go
-
3weed/pb/mount_pb/mount_grpc.pb.go
-
9weed/pb/mq_agent_pb/mq_agent.pb.go
-
3weed/pb/mq_agent_pb/mq_agent_grpc.pb.go
-
9weed/pb/mq_pb/mq_broker.pb.go
-
3weed/pb/mq_pb/mq_broker_grpc.pb.go
-
7weed/pb/remote_pb/remote.pb.go
-
7weed/pb/s3_pb/s3.pb.go
-
3weed/pb/s3_pb/s3_grpc.pb.go
-
7weed/pb/schema_pb/mq_schema.pb.go
-
9weed/pb/volume_server_pb/volume_server.pb.go
-
782weed/pb/volume_server_pb/volume_server_grpc.pb.go
-
7weed/pb/worker_pb/worker.pb.go
-
3weed/pb/worker_pb/worker_grpc.pb.go
-
6weed/server/filer_server.go
-
351weed/server/filer_server_handlers_iam_grpc.go
-
339weed/shell/command_s3_configure.go
-
150weed/shell/command_s3_policy.go
@ -0,0 +1,296 @@ |
|||
package policy |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"net" |
|||
"net/http" |
|||
"os" |
|||
"os/exec" |
|||
"path/filepath" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/command" |
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" |
|||
"github.com/stretchr/testify/require" |
|||
) |
|||
|
|||
// TestCluster manages the weed mini instance for integration testing
|
|||
type TestCluster struct { |
|||
dataDir string |
|||
ctx context.Context |
|||
cancel context.CancelFunc |
|||
isRunning bool |
|||
wg sync.WaitGroup |
|||
masterPort int |
|||
volumePort int |
|||
filerPort int |
|||
s3Port int |
|||
s3Endpoint string |
|||
} |
|||
|
|||
func TestS3PolicyShellRevised(t *testing.T) { |
|||
if testing.Short() { |
|||
t.Skip("Skipping integration test in short mode") |
|||
} |
|||
cluster, err := startMiniCluster(t) |
|||
require.NoError(t, err) |
|||
defer cluster.Stop() |
|||
|
|||
policyContent := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":"*","Resource":"*"}]}` |
|||
tmpPolicyFile, err := os.CreateTemp("", "test_policy_*.json") |
|||
if err != nil { |
|||
t.Fatalf("Failed to create temp policy file: %v", err) |
|||
} |
|||
defer os.Remove(tmpPolicyFile.Name()) |
|||
_, err = tmpPolicyFile.WriteString(policyContent) |
|||
require.NoError(t, err) |
|||
require.NoError(t, tmpPolicyFile.Close()) |
|||
|
|||
weedCmd := "weed" |
|||
masterAddr := fmt.Sprintf("127.0.0.1:%d", cluster.masterPort) |
|||
filerAddr := fmt.Sprintf("127.0.0.1:%d", cluster.filerPort) |
|||
|
|||
// Put
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, fmt.Sprintf("s3.policy -put -name=testpolicy -file=%s", tmpPolicyFile.Name())) |
|||
|
|||
// List
|
|||
out := execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -list") |
|||
if !contains(out, "Name: testpolicy") { |
|||
t.Errorf("List failed: %s", out) |
|||
} |
|||
|
|||
// Get
|
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -get -name=testpolicy") |
|||
if !contains(out, "Statement") { |
|||
t.Errorf("Get failed: %s", out) |
|||
} |
|||
|
|||
// Delete
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -delete -name=testpolicy") |
|||
|
|||
// Verify
|
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.policy -list") |
|||
if contains(out, "Name: testpolicy") { |
|||
t.Errorf("delete failed, policy 'testpolicy' should not be in the list: %s", out) |
|||
} |
|||
// Verify s3.configure linking policies
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Read -policies=testpolicy -apply") |
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure") |
|||
if !contains(out, "\"policyNames\": [\n \"testpolicy\"\n ]") { |
|||
// relaxed check
|
|||
if !contains(out, "\"testpolicy\"") || !contains(out, "policyNames") { |
|||
t.Errorf("s3.configure failed to link policy: %s", out) |
|||
} |
|||
} |
|||
|
|||
// 1. Update User: Add Write action
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Write -apply") |
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure") |
|||
if !contains(out, "Write") { |
|||
t.Errorf("s3.configure failed to add Write action: %s", out) |
|||
} |
|||
|
|||
// 2. Granular Delete: Delete Read action
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -actions=Read -delete -apply") |
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure") |
|||
if contains(out, "\"Read\"") { // Quote to avoid matching partial words if any
|
|||
t.Errorf("s3.configure failed to delete Read action: %s", out) |
|||
} |
|||
if !contains(out, "Write") { |
|||
t.Errorf("s3.configure deleted Write action unnecessarily: %s", out) |
|||
} |
|||
|
|||
// 3. Access Key Management
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -access_key=testkey -secret_key=testsecret -apply") |
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure") |
|||
if !contains(out, "testkey") { |
|||
t.Errorf("s3.configure failed to add access key: %s", out) |
|||
} |
|||
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -access_key=testkey -delete -apply") |
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure") |
|||
if contains(out, "testkey") { |
|||
t.Errorf("s3.configure failed to delete access key: %s", out) |
|||
} |
|||
|
|||
// 4. Delete User
|
|||
execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure -user=test -delete -apply") |
|||
out = execShell(t, weedCmd, masterAddr, filerAddr, "s3.configure") |
|||
if contains(out, "\"Name\": \"test\"") { |
|||
t.Errorf("s3.configure failed to delete user: %s", out) |
|||
} |
|||
} |
|||
|
|||
func execShell(t *testing.T, weedCmd, master, filer, shellCmd string) string { |
|||
// weed shell -master=... -filer=...
|
|||
args := []string{"shell", "-master=" + master, "-filer=" + filer} |
|||
t.Logf("Running: %s %v <<< %s", weedCmd, args, shellCmd) |
|||
|
|||
cmd := exec.Command(weedCmd, args...) |
|||
cmd.Stdin = strings.NewReader(shellCmd + "\n") |
|||
|
|||
out, err := cmd.CombinedOutput() |
|||
if err != nil { |
|||
t.Fatalf("Failed to run %s: %v\nOutput: %s", shellCmd, err, string(out)) |
|||
} |
|||
return string(out) |
|||
} |
|||
|
|||
// --- Test setup helpers ---
|
|||
|
|||
func findAvailablePort() (int, error) { |
|||
listener, err := net.Listen("tcp", "127.0.0.1:0") |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
defer listener.Close() |
|||
addr := listener.Addr().(*net.TCPAddr) |
|||
return addr.Port, nil |
|||
} |
|||
|
|||
// findAvailablePortPair finds an available http port P such that P and P+10000 (grpc) are both available
|
|||
func findAvailablePortPair() (int, int, error) { |
|||
for i := 0; i < 100; i++ { |
|||
httpPort, err := findAvailablePort() |
|||
if err != nil { |
|||
return 0, 0, err |
|||
} |
|||
grpcPort := httpPort + 10000 |
|||
|
|||
// check if grpc port is available
|
|||
listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", grpcPort)) |
|||
if err == nil { |
|||
listener.Close() |
|||
return httpPort, grpcPort, nil |
|||
} |
|||
} |
|||
return 0, 0, fmt.Errorf("failed to find available port pair") |
|||
} |
|||
|
|||
func startMiniCluster(t *testing.T) (*TestCluster, error) { |
|||
masterPort, masterGrpcPort, err := findAvailablePortPair() |
|||
require.NoError(t, err) |
|||
volumePort, volumeGrpcPort, err := findAvailablePortPair() |
|||
require.NoError(t, err) |
|||
filerPort, filerGrpcPort, err := findAvailablePortPair() |
|||
require.NoError(t, err) |
|||
s3Port, s3GrpcPort, err := findAvailablePortPair() |
|||
require.NoError(t, err) |
|||
|
|||
testDir := t.TempDir() |
|||
|
|||
ctx, cancel := context.WithCancel(context.Background()) |
|||
s3Endpoint := fmt.Sprintf("http://127.0.0.1:%d", s3Port) |
|||
cluster := &TestCluster{ |
|||
dataDir: testDir, |
|||
ctx: ctx, |
|||
cancel: cancel, |
|||
masterPort: masterPort, |
|||
volumePort: volumePort, |
|||
filerPort: filerPort, |
|||
s3Port: s3Port, |
|||
s3Endpoint: s3Endpoint, |
|||
} |
|||
|
|||
// Disable authentication for tests
|
|||
securityToml := filepath.Join(testDir, "security.toml") |
|||
err = os.WriteFile(securityToml, []byte("# Empty security config\n"), 0644) |
|||
require.NoError(t, err) |
|||
|
|||
// Configure credential store for IAM tests
|
|||
credentialToml := filepath.Join(testDir, "credential.toml") |
|||
credentialConfig := ` |
|||
[credential.memory] |
|||
enabled = true |
|||
` |
|||
err = os.WriteFile(credentialToml, []byte(credentialConfig), 0644) |
|||
require.NoError(t, err) |
|||
|
|||
cluster.wg.Add(1) |
|||
go func() { |
|||
defer cluster.wg.Done() |
|||
oldDir, _ := os.Getwd() |
|||
oldArgs := os.Args |
|||
defer func() { |
|||
os.Chdir(oldDir) |
|||
os.Args = oldArgs |
|||
}() |
|||
os.Chdir(testDir) |
|||
os.Args = []string{ |
|||
"weed", |
|||
"-dir=" + testDir, |
|||
"-master.port=" + strconv.Itoa(masterPort), |
|||
"-master.port.grpc=" + strconv.Itoa(masterGrpcPort), |
|||
"-volume.port=" + strconv.Itoa(volumePort), |
|||
"-volume.port.grpc=" + strconv.Itoa(volumeGrpcPort), |
|||
"-filer.port=" + strconv.Itoa(filerPort), |
|||
"-filer.port.grpc=" + strconv.Itoa(filerGrpcPort), |
|||
"-s3.port=" + strconv.Itoa(s3Port), |
|||
"-s3.port.grpc=" + strconv.Itoa(s3GrpcPort), |
|||
"-webdav.port=0", |
|||
"-admin.ui=false", |
|||
"-master.volumeSizeLimitMB=32", |
|||
"-ip=127.0.0.1", |
|||
"-master.peers=none", |
|||
} |
|||
glog.MaxSize = 1024 * 1024 |
|||
for _, cmd := range command.Commands { |
|||
if cmd.Name() == "mini" && cmd.Run != nil { |
|||
cmd.Flag.Parse(os.Args[1:]) |
|||
cmd.Run(cmd, cmd.Flag.Args()) |
|||
return |
|||
} |
|||
} |
|||
}() |
|||
|
|||
// Wait for S3
|
|||
err = waitForS3Ready(cluster.s3Endpoint, 60*time.Second) |
|||
if err != nil { |
|||
cancel() |
|||
return nil, err |
|||
} |
|||
cluster.isRunning = true |
|||
return cluster, nil |
|||
} |
|||
|
|||
func waitForS3Ready(endpoint string, timeout time.Duration) error { |
|||
client := &http.Client{Timeout: 1 * time.Second} |
|||
deadline := time.Now().Add(timeout) |
|||
for time.Now().Before(deadline) { |
|||
resp, err := client.Get(endpoint) |
|||
if err == nil { |
|||
resp.Body.Close() |
|||
return nil |
|||
} |
|||
time.Sleep(200 * time.Millisecond) |
|||
} |
|||
return fmt.Errorf("timeout waiting for S3") |
|||
} |
|||
|
|||
func (c *TestCluster) Stop() { |
|||
if c.cancel != nil { |
|||
c.cancel() |
|||
} |
|||
if c.isRunning { |
|||
time.Sleep(500 * time.Millisecond) |
|||
} |
|||
// Simplified stop
|
|||
for _, cmd := range command.Commands { |
|||
if cmd.Name() == "mini" { |
|||
cmd.Flag.VisitAll(func(f *flag.Flag) { |
|||
f.Value.Set(f.DefValue) |
|||
}) |
|||
break |
|||
} |
|||
} |
|||
} |
|||
|
|||
func contains(s, substr string) bool { |
|||
return strings.Contains(s, substr) |
|||
} |
|||
@ -0,0 +1,120 @@ |
|||
package grpc |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" |
|||
) |
|||
|
|||
func (store *IamGrpcStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3ApiConfiguration, error) { |
|||
var config *iam_pb.S3ApiConfiguration |
|||
err := store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
resp, err := client.GetConfiguration(ctx, &iam_pb.GetConfigurationRequest{}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
config = resp.Configuration |
|||
return nil |
|||
}) |
|||
return config, err |
|||
} |
|||
|
|||
func (store *IamGrpcStore) SaveConfiguration(ctx context.Context, config *iam_pb.S3ApiConfiguration) error { |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.PutConfiguration(ctx, &iam_pb.PutConfigurationRequest{ |
|||
Configuration: config, |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) CreateUser(ctx context.Context, identity *iam_pb.Identity) error { |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.CreateUser(ctx, &iam_pb.CreateUserRequest{ |
|||
Identity: identity, |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) { |
|||
var identity *iam_pb.Identity |
|||
err := store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
resp, err := client.GetUser(ctx, &iam_pb.GetUserRequest{ |
|||
Username: username, |
|||
}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
identity = resp.Identity |
|||
return nil |
|||
}) |
|||
return identity, err |
|||
} |
|||
|
|||
func (store *IamGrpcStore) UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error { |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.UpdateUser(ctx, &iam_pb.UpdateUserRequest{ |
|||
Username: username, |
|||
Identity: identity, |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) DeleteUser(ctx context.Context, username string) error { |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.DeleteUser(ctx, &iam_pb.DeleteUserRequest{ |
|||
Username: username, |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) ListUsers(ctx context.Context) ([]string, error) { |
|||
var usernames []string |
|||
err := store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
resp, err := client.ListUsers(ctx, &iam_pb.ListUsersRequest{}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
usernames = resp.Usernames |
|||
return nil |
|||
}) |
|||
return usernames, err |
|||
} |
|||
|
|||
func (store *IamGrpcStore) GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) { |
|||
var identity *iam_pb.Identity |
|||
err := store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
resp, err := client.GetUserByAccessKey(ctx, &iam_pb.GetUserByAccessKeyRequest{ |
|||
AccessKey: accessKey, |
|||
}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
identity = resp.Identity |
|||
return nil |
|||
}) |
|||
return identity, err |
|||
} |
|||
|
|||
func (store *IamGrpcStore) CreateAccessKey(ctx context.Context, username string, credential *iam_pb.Credential) error { |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.CreateAccessKey(ctx, &iam_pb.CreateAccessKeyRequest{ |
|||
Username: username, |
|||
Credential: credential, |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) DeleteAccessKey(ctx context.Context, username string, accessKey string) error { |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.DeleteAccessKey(ctx, &iam_pb.DeleteAccessKeyRequest{ |
|||
Username: username, |
|||
AccessKey: accessKey, |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
@ -0,0 +1,69 @@ |
|||
package grpc |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"fmt" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" |
|||
) |
|||
|
|||
func (store *IamGrpcStore) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) { |
|||
policies := make(map[string]policy_engine.PolicyDocument) |
|||
err := store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
resp, err := client.ListPolicies(ctx, &iam_pb.ListPoliciesRequest{}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
for _, p := range resp.Policies { |
|||
var doc policy_engine.PolicyDocument |
|||
if err := json.Unmarshal([]byte(p.Content), &doc); err != nil { |
|||
return fmt.Errorf("failed to unmarshal policy %s: %v", p.Name, err) |
|||
} |
|||
policies[p.Name] = doc |
|||
} |
|||
return nil |
|||
}) |
|||
return policies, err |
|||
} |
|||
|
|||
func (store *IamGrpcStore) PutPolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { |
|||
content, err := json.Marshal(document) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.PutPolicy(ctx, &iam_pb.PutPolicyRequest{ |
|||
Name: name, |
|||
Content: string(content), |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) DeletePolicy(ctx context.Context, name string) error { |
|||
return store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
_, err := client.DeletePolicy(ctx, &iam_pb.DeletePolicyRequest{ |
|||
Name: name, |
|||
}) |
|||
return err |
|||
}) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) { |
|||
var doc policy_engine.PolicyDocument |
|||
err := store.withIamClient(func(client iam_pb.SeaweedIdentityAccessManagementClient) error { |
|||
resp, err := client.GetPolicy(ctx, &iam_pb.GetPolicyRequest{ |
|||
Name: name, |
|||
}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return json.Unmarshal([]byte(resp.Content), &doc) |
|||
}) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return &doc, nil |
|||
} |
|||
@ -0,0 +1,72 @@ |
|||
package grpc |
|||
|
|||
import ( |
|||
"fmt" |
|||
"sync" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/credential" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/util" |
|||
"google.golang.org/grpc" |
|||
) |
|||
|
|||
func init() { |
|||
credential.Stores = append(credential.Stores, &IamGrpcStore{}) |
|||
} |
|||
|
|||
// IamGrpcStore implements CredentialStore using SeaweedFS IAM gRPC service
|
|||
type IamGrpcStore struct { |
|||
filerAddressFunc func() pb.ServerAddress // Function to get current active filer
|
|||
grpcDialOption grpc.DialOption |
|||
mu sync.RWMutex // Protects filerAddressFunc and grpcDialOption
|
|||
} |
|||
|
|||
func (store *IamGrpcStore) GetName() credential.CredentialStoreTypeName { |
|||
return credential.StoreTypeGrpc |
|||
} |
|||
|
|||
func (store *IamGrpcStore) Initialize(configuration util.Configuration, prefix string) error { |
|||
if configuration != nil { |
|||
filerAddr := configuration.GetString(prefix + "filer") |
|||
if filerAddr != "" { |
|||
store.mu.Lock() |
|||
store.filerAddressFunc = func() pb.ServerAddress { |
|||
return pb.ServerAddress(filerAddr) |
|||
} |
|||
store.mu.Unlock() |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (store *IamGrpcStore) SetFilerAddressFunc(getFiler func() pb.ServerAddress, grpcDialOption grpc.DialOption) { |
|||
store.mu.Lock() |
|||
defer store.mu.Unlock() |
|||
store.filerAddressFunc = getFiler |
|||
store.grpcDialOption = grpcDialOption |
|||
} |
|||
|
|||
func (store *IamGrpcStore) withIamClient(fn func(client iam_pb.SeaweedIdentityAccessManagementClient) error) error { |
|||
store.mu.RLock() |
|||
if store.filerAddressFunc == nil { |
|||
store.mu.RUnlock() |
|||
return fmt.Errorf("iam_grpc: filer not yet available") |
|||
} |
|||
|
|||
filerAddress := store.filerAddressFunc() |
|||
dialOption := store.grpcDialOption |
|||
store.mu.RUnlock() |
|||
|
|||
if filerAddress == "" { |
|||
return fmt.Errorf("iam_grpc: no filer discovered yet") |
|||
} |
|||
|
|||
return pb.WithGrpcClient(false, 0, func(conn *grpc.ClientConn) error { |
|||
client := iam_pb.NewSeaweedIdentityAccessManagementClient(conn) |
|||
return fn(client) |
|||
}, filerAddress.ToGrpcAddress(), false, dialOption) |
|||
} |
|||
|
|||
func (store *IamGrpcStore) Shutdown() { |
|||
} |
|||
1836
weed/pb/iam_pb/iam.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
782
weed/pb/volume_server_pb/volume_server_grpc.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,351 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/credential" |
|||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" |
|||
"google.golang.org/grpc/codes" |
|||
"google.golang.org/grpc/status" |
|||
) |
|||
|
|||
// IamGrpcServer implements the IAM gRPC service on the filer
|
|||
type IamGrpcServer struct { |
|||
iam_pb.UnimplementedSeaweedIdentityAccessManagementServer |
|||
credentialManager *credential.CredentialManager |
|||
} |
|||
|
|||
// NewIamGrpcServer creates a new IAM gRPC server
|
|||
func NewIamGrpcServer(credentialManager *credential.CredentialManager) *IamGrpcServer { |
|||
return &IamGrpcServer{ |
|||
credentialManager: credentialManager, |
|||
} |
|||
} |
|||
|
|||
//////////////////////////////////////////////////
|
|||
// Configuration Management
|
|||
|
|||
func (s *IamGrpcServer) GetConfiguration(ctx context.Context, req *iam_pb.GetConfigurationRequest) (*iam_pb.GetConfigurationResponse, error) { |
|||
glog.V(4).Infof("GetConfiguration") |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
config, err := s.credentialManager.LoadConfiguration(ctx) |
|||
if err != nil { |
|||
glog.Errorf("Failed to load configuration: %v", err) |
|||
return nil, err |
|||
} |
|||
|
|||
return &iam_pb.GetConfigurationResponse{ |
|||
Configuration: config, |
|||
}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) PutConfiguration(ctx context.Context, req *iam_pb.PutConfigurationRequest) (*iam_pb.PutConfigurationResponse, error) { |
|||
glog.V(4).Infof("PutConfiguration") |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
if req.Configuration == nil { |
|||
return nil, status.Errorf(codes.InvalidArgument, "configuration is nil") |
|||
} |
|||
|
|||
err := s.credentialManager.SaveConfiguration(ctx, req.Configuration) |
|||
if err != nil { |
|||
glog.Errorf("Failed to save configuration: %v", err) |
|||
return nil, err |
|||
} |
|||
|
|||
return &iam_pb.PutConfigurationResponse{}, nil |
|||
} |
|||
|
|||
//////////////////////////////////////////////////
|
|||
// User Management
|
|||
|
|||
func (s *IamGrpcServer) CreateUser(ctx context.Context, req *iam_pb.CreateUserRequest) (*iam_pb.CreateUserResponse, error) { |
|||
if req == nil || req.Identity == nil { |
|||
return nil, status.Errorf(codes.InvalidArgument, "identity is required") |
|||
} |
|||
glog.V(4).Infof("CreateUser: %s", req.Identity.Name) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
err := s.credentialManager.CreateUser(ctx, req.Identity) |
|||
if err != nil { |
|||
if err == credential.ErrUserAlreadyExists { |
|||
return nil, status.Errorf(codes.AlreadyExists, "user %s already exists", req.Identity.Name) |
|||
} |
|||
glog.Errorf("Failed to create user %s: %v", req.Identity.Name, err) |
|||
return nil, status.Errorf(codes.Internal, "failed to create user: %v", err) |
|||
} |
|||
|
|||
return &iam_pb.CreateUserResponse{}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) GetUser(ctx context.Context, req *iam_pb.GetUserRequest) (*iam_pb.GetUserResponse, error) { |
|||
glog.V(4).Infof("GetUser: %s", req.Username) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
identity, err := s.credentialManager.GetUser(ctx, req.Username) |
|||
if err != nil { |
|||
if err == credential.ErrUserNotFound { |
|||
return nil, status.Errorf(codes.NotFound, "user %s not found", req.Username) |
|||
} |
|||
glog.Errorf("Failed to get user %s: %v", req.Username, err) |
|||
return nil, status.Errorf(codes.Internal, "failed to get user: %v", err) |
|||
} |
|||
|
|||
return &iam_pb.GetUserResponse{ |
|||
Identity: identity, |
|||
}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) UpdateUser(ctx context.Context, req *iam_pb.UpdateUserRequest) (*iam_pb.UpdateUserResponse, error) { |
|||
glog.V(4).Infof("UpdateUser: %s", req.Username) |
|||
if req == nil || req.Identity == nil { |
|||
return nil, status.Errorf(codes.InvalidArgument, "identity is required") |
|||
} |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
err := s.credentialManager.UpdateUser(ctx, req.Username, req.Identity) |
|||
if err != nil { |
|||
if err == credential.ErrUserNotFound { |
|||
return nil, status.Errorf(codes.NotFound, "user %s not found", req.Username) |
|||
} |
|||
glog.Errorf("Failed to update user %s: %v", req.Username, err) |
|||
return nil, status.Errorf(codes.Internal, "failed to update user: %v", err) |
|||
} |
|||
|
|||
return &iam_pb.UpdateUserResponse{}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) DeleteUser(ctx context.Context, req *iam_pb.DeleteUserRequest) (*iam_pb.DeleteUserResponse, error) { |
|||
glog.V(4).Infof("DeleteUser: %s", req.Username) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
err := s.credentialManager.DeleteUser(ctx, req.Username) |
|||
if err != nil { |
|||
if err == credential.ErrUserNotFound { |
|||
// Deleting a non-existent user is generally considered a success or Not Found depending on semantics
|
|||
// In S3 API, usually idempotent. But for Admin API, often 404.
|
|||
// Here we return NotFound to let client decide, but traditionally delete is idempotent.
|
|||
// However, if we want strict status codes:
|
|||
return nil, status.Errorf(codes.NotFound, "user %s not found", req.Username) |
|||
} |
|||
glog.Errorf("Failed to delete user %s: %v", req.Username, err) |
|||
return nil, status.Errorf(codes.Internal, "failed to delete user: %v", err) |
|||
} |
|||
|
|||
return &iam_pb.DeleteUserResponse{}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) ListUsers(ctx context.Context, req *iam_pb.ListUsersRequest) (*iam_pb.ListUsersResponse, error) { |
|||
glog.V(4).Infof("ListUsers") |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
usernames, err := s.credentialManager.ListUsers(ctx) |
|||
if err != nil { |
|||
glog.Errorf("Failed to list users: %v", err) |
|||
return nil, err |
|||
} |
|||
|
|||
return &iam_pb.ListUsersResponse{ |
|||
Usernames: usernames, |
|||
}, nil |
|||
} |
|||
|
|||
//////////////////////////////////////////////////
|
|||
// Access Key Management
|
|||
|
|||
func (s *IamGrpcServer) CreateAccessKey(ctx context.Context, req *iam_pb.CreateAccessKeyRequest) (*iam_pb.CreateAccessKeyResponse, error) { |
|||
if req == nil || req.Credential == nil { |
|||
return nil, status.Errorf(codes.InvalidArgument, "credential is required") |
|||
} |
|||
glog.V(4).Infof("CreateAccessKey for user: %s", req.Username) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
err := s.credentialManager.CreateAccessKey(ctx, req.Username, req.Credential) |
|||
if err != nil { |
|||
if err == credential.ErrUserNotFound { |
|||
return nil, status.Errorf(codes.NotFound, "user %s not found", req.Username) |
|||
} |
|||
glog.Errorf("Failed to create access key for user %s: %v", req.Username, err) |
|||
return nil, status.Errorf(codes.Internal, "failed to create access key: %v", err) |
|||
} |
|||
|
|||
return &iam_pb.CreateAccessKeyResponse{}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) DeleteAccessKey(ctx context.Context, req *iam_pb.DeleteAccessKeyRequest) (*iam_pb.DeleteAccessKeyResponse, error) { |
|||
glog.V(4).Infof("DeleteAccessKey: %s for user: %s", req.AccessKey, req.Username) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
err := s.credentialManager.DeleteAccessKey(ctx, req.Username, req.AccessKey) |
|||
if err != nil { |
|||
if err == credential.ErrUserNotFound { |
|||
return nil, status.Errorf(codes.NotFound, "user %s not found", req.Username) |
|||
} |
|||
if err == credential.ErrAccessKeyNotFound { |
|||
return nil, status.Errorf(codes.NotFound, "access key %s not found", req.AccessKey) |
|||
} |
|||
glog.Errorf("Failed to delete access key %s for user %s: %v", req.AccessKey, req.Username, err) |
|||
return nil, status.Errorf(codes.Internal, "failed to delete access key: %v", err) |
|||
} |
|||
|
|||
return &iam_pb.DeleteAccessKeyResponse{}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) GetUserByAccessKey(ctx context.Context, req *iam_pb.GetUserByAccessKeyRequest) (*iam_pb.GetUserByAccessKeyResponse, error) { |
|||
glog.V(4).Infof("GetUserByAccessKey: %s", req.AccessKey) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
identity, err := s.credentialManager.GetUserByAccessKey(ctx, req.AccessKey) |
|||
if err != nil { |
|||
if err == credential.ErrAccessKeyNotFound { |
|||
return nil, status.Errorf(codes.NotFound, "access key %s not found", req.AccessKey) |
|||
} |
|||
glog.Errorf("Failed to get user by access key %s: %v", req.AccessKey, err) |
|||
return nil, status.Errorf(codes.Internal, "failed to get user: %v", err) |
|||
} |
|||
|
|||
return &iam_pb.GetUserByAccessKeyResponse{ |
|||
Identity: identity, |
|||
}, nil |
|||
} |
|||
|
|||
//////////////////////////////////////////////////
|
|||
// Policy Management
|
|||
|
|||
func (s *IamGrpcServer) PutPolicy(ctx context.Context, req *iam_pb.PutPolicyRequest) (*iam_pb.PutPolicyResponse, error) { |
|||
glog.V(4).Infof("PutPolicy: %s", req.Name) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
if req.Name == "" { |
|||
return nil, status.Errorf(codes.InvalidArgument, "policy name is required") |
|||
} |
|||
if req.Content == "" { |
|||
return nil, status.Errorf(codes.InvalidArgument, "policy content is required") |
|||
} |
|||
|
|||
var policy policy_engine.PolicyDocument |
|||
if err := json.Unmarshal([]byte(req.Content), &policy); err != nil { |
|||
glog.Errorf("Failed to unmarshal policy %s: %v", req.Name, err) |
|||
return nil, err |
|||
} |
|||
|
|||
err := s.credentialManager.PutPolicy(ctx, req.Name, policy) |
|||
if err != nil { |
|||
glog.Errorf("Failed to put policy %s: %v", req.Name, err) |
|||
return nil, err |
|||
} |
|||
|
|||
return &iam_pb.PutPolicyResponse{}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) GetPolicy(ctx context.Context, req *iam_pb.GetPolicyRequest) (*iam_pb.GetPolicyResponse, error) { |
|||
glog.V(4).Infof("GetPolicy: %s", req.Name) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
policy, err := s.credentialManager.GetPolicy(ctx, req.Name) |
|||
if err != nil { |
|||
glog.Errorf("Failed to get policy %s: %v", req.Name, err) |
|||
return nil, err |
|||
} |
|||
|
|||
if policy == nil { |
|||
return nil, status.Errorf(codes.NotFound, "policy %s not found", req.Name) |
|||
} |
|||
|
|||
jsonBytes, err := json.Marshal(policy) |
|||
if err != nil { |
|||
glog.Errorf("Failed to marshal policy %s: %v", req.Name, err) |
|||
return nil, err |
|||
} |
|||
|
|||
return &iam_pb.GetPolicyResponse{ |
|||
Name: req.Name, |
|||
Content: string(jsonBytes), |
|||
}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) ListPolicies(ctx context.Context, req *iam_pb.ListPoliciesRequest) (*iam_pb.ListPoliciesResponse, error) { |
|||
glog.V(4).Infof("ListPolicies") |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
policiesData, err := s.credentialManager.GetPolicies(ctx) |
|||
if err != nil { |
|||
glog.Errorf("Failed to list policies: %v", err) |
|||
return nil, err |
|||
} |
|||
|
|||
var policies []*iam_pb.Policy |
|||
for name, policy := range policiesData { |
|||
jsonBytes, err := json.Marshal(policy) |
|||
if err != nil { |
|||
return nil, status.Errorf(codes.Internal, "failed to marshal policy %s: %v", name, err) |
|||
} |
|||
policies = append(policies, &iam_pb.Policy{ |
|||
Name: name, |
|||
Content: string(jsonBytes), |
|||
}) |
|||
} |
|||
|
|||
return &iam_pb.ListPoliciesResponse{ |
|||
Policies: policies, |
|||
}, nil |
|||
} |
|||
|
|||
func (s *IamGrpcServer) DeletePolicy(ctx context.Context, req *iam_pb.DeletePolicyRequest) (*iam_pb.DeletePolicyResponse, error) { |
|||
glog.V(4).Infof("DeletePolicy: %s", req.Name) |
|||
|
|||
if s.credentialManager == nil { |
|||
return nil, status.Errorf(codes.FailedPrecondition, "credential manager is not configured") |
|||
} |
|||
|
|||
err := s.credentialManager.DeletePolicy(ctx, req.Name) |
|||
if err != nil { |
|||
glog.Errorf("Failed to delete policy %s: %v", req.Name, err) |
|||
return nil, err |
|||
} |
|||
|
|||
return &iam_pb.DeletePolicyResponse{}, nil |
|||
} |
|||
@ -0,0 +1,150 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"flag" |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
"time" |
|||
|
|||
"github.com/seaweedfs/seaweedfs/weed/pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" |
|||
"github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" |
|||
"google.golang.org/grpc" |
|||
) |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandS3Policy{}) |
|||
} |
|||
|
|||
type commandS3Policy struct { |
|||
} |
|||
|
|||
func (c *commandS3Policy) Name() string { |
|||
return "s3.policy" |
|||
} |
|||
|
|||
func (c *commandS3Policy) Help() string { |
|||
return `manage s3 policies |
|||
|
|||
# create or update a policy |
|||
s3.policy -put -name=mypolicy -file=policy.json |
|||
|
|||
# list all policies |
|||
s3.policy -list |
|||
|
|||
# get a policy |
|||
s3.policy -get -name=mypolicy |
|||
|
|||
# delete a policy |
|||
s3.policy -delete -name=mypolicy |
|||
` |
|||
} |
|||
|
|||
func (c *commandS3Policy) HasTag(CommandTag) bool { |
|||
return false |
|||
} |
|||
|
|||
func (c *commandS3Policy) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
|
|||
s3PolicyCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
put := s3PolicyCommand.Bool("put", false, "create or update a policy") |
|||
get := s3PolicyCommand.Bool("get", false, "get a policy") |
|||
list := s3PolicyCommand.Bool("list", false, "list all policies") |
|||
del := s3PolicyCommand.Bool("delete", false, "delete a policy") |
|||
name := s3PolicyCommand.String("name", "", "policy name") |
|||
file := s3PolicyCommand.String("file", "", "policy file (json)") |
|||
|
|||
if err = s3PolicyCommand.Parse(args); err != nil { |
|||
return err |
|||
} |
|||
|
|||
actionCount := 0 |
|||
for _, v := range []bool{*put, *get, *list, *del} { |
|||
if v { |
|||
actionCount++ |
|||
} |
|||
} |
|||
if actionCount == 0 { |
|||
return fmt.Errorf("one of -put, -get, -list, -delete must be specified") |
|||
} |
|||
if actionCount > 1 { |
|||
return fmt.Errorf("only one of -put, -get, -list, -delete can be specified") |
|||
} |
|||
|
|||
return pb.WithGrpcClient(false, 0, func(conn *grpc.ClientConn) error { |
|||
client := iam_pb.NewSeaweedIdentityAccessManagementClient(conn) |
|||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
|||
defer cancel() |
|||
|
|||
if *put { |
|||
if *name == "" { |
|||
return fmt.Errorf("-name is required") |
|||
} |
|||
if *file == "" { |
|||
return fmt.Errorf("-file is required") |
|||
} |
|||
data, err := os.ReadFile(*file) |
|||
if err != nil { |
|||
return fmt.Errorf("failed to read policy file: %v", err) |
|||
} |
|||
|
|||
// Validate JSON
|
|||
var policy policy_engine.PolicyDocument |
|||
if err := json.Unmarshal(data, &policy); err != nil { |
|||
return fmt.Errorf("invalid policy json: %v", err) |
|||
} |
|||
|
|||
_, err = client.PutPolicy(ctx, &iam_pb.PutPolicyRequest{ |
|||
Name: *name, |
|||
Content: string(data), |
|||
}) |
|||
return err |
|||
} |
|||
|
|||
if *get { |
|||
if *name == "" { |
|||
return fmt.Errorf("-name is required") |
|||
} |
|||
resp, err := client.GetPolicy(ctx, &iam_pb.GetPolicyRequest{ |
|||
Name: *name, |
|||
}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if resp.Content == "" { |
|||
return fmt.Errorf("policy not found") |
|||
} |
|||
fmt.Fprintf(writer, "%s\n", resp.Content) |
|||
return nil |
|||
} |
|||
|
|||
if *list { |
|||
resp, err := client.ListPolicies(ctx, &iam_pb.ListPoliciesRequest{}) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
for _, policy := range resp.Policies { |
|||
fmt.Fprintf(writer, "Name: %s\n", policy.Name) |
|||
fmt.Fprintf(writer, "Content: %s\n", policy.Content) |
|||
fmt.Fprintf(writer, "---\n") |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
if *del { |
|||
if *name == "" { |
|||
return fmt.Errorf("-name is required") |
|||
} |
|||
_, err := client.DeletePolicy(ctx, &iam_pb.DeletePolicyRequest{ |
|||
Name: *name, |
|||
}) |
|||
return err |
|||
} |
|||
|
|||
return nil |
|||
}, commandEnv.option.FilerAddress.ToGrpcAddress(), false, commandEnv.option.GrpcDialOption) |
|||
|
|||
} |
|||
Write
Preview
Loading…
Cancel
Save
Reference in new issue