From d7c30fdb2bc41dbd1eba45bddc4b4424aa1a1bcb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 11 Jan 2026 12:31:46 -0800 Subject: [PATCH 01/17] fix: admin does not show all master servers #7999 (#8002) --- weed/server/master_grpc_server_raft.go | 43 ++++++++++++++++++++------ 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/weed/server/master_grpc_server_raft.go b/weed/server/master_grpc_server_raft.go index 4aab7fe6e..aa7c6ff28 100644 --- a/weed/server/master_grpc_server_raft.go +++ b/weed/server/master_grpc_server_raft.go @@ -14,22 +14,47 @@ func (ms *MasterServer) RaftListClusterServers(ctx context.Context, req *master_ resp := &master_pb.RaftListClusterServersResponse{} ms.Topo.RaftServerAccessLock.RLock() - if ms.Topo.HashicorpRaft == nil { + if ms.Topo.HashicorpRaft == nil && ms.Topo.RaftServer == nil { ms.Topo.RaftServerAccessLock.RUnlock() return resp, nil } - servers := ms.Topo.HashicorpRaft.GetConfiguration().Configuration().Servers - _, leaderId := ms.Topo.HashicorpRaft.LeaderWithID() - ms.Topo.RaftServerAccessLock.RUnlock() + if ms.Topo.HashicorpRaft != nil { + servers := ms.Topo.HashicorpRaft.GetConfiguration().Configuration().Servers + _, leaderId := ms.Topo.HashicorpRaft.LeaderWithID() + ms.Topo.RaftServerAccessLock.RUnlock() - for _, server := range servers { + for _, server := range servers { + resp.ClusterServers = append(resp.ClusterServers, &master_pb.RaftListClusterServersResponse_ClusterServers{ + Id: string(server.ID), + Address: string(server.Address), + Suffrage: server.Suffrage.String(), + IsLeader: server.ID == leaderId, + }) + } + } else if ms.Topo.RaftServer != nil { + peers := ms.Topo.RaftServer.Peers() + leader := ms.Topo.RaftServer.Leader() + currentServerName := ms.Topo.RaftServer.Name() + ms.Topo.RaftServerAccessLock.RUnlock() + + // Add the current server itself (Peers() only returns other peers) resp.ClusterServers = append(resp.ClusterServers, &master_pb.RaftListClusterServersResponse_ClusterServers{ - Id: string(server.ID), - Address: string(server.Address), - Suffrage: server.Suffrage.String(), - IsLeader: server.ID == leaderId, + Id: currentServerName, + Address: string(ms.option.Master), + Suffrage: "Voter", + IsLeader: currentServerName == leader, }) + + // Add all other peers + for _, peer := range peers { + resp.ClusterServers = append(resp.ClusterServers, &master_pb.RaftListClusterServersResponse_ClusterServers{ + Id: peer.Name, + Address: peer.ConnectionString, + Suffrage: "Voter", + IsLeader: peer.Name == leader, + }) + } } return resp, nil } From 06391701eda120fdc614ae8605e743db3c7f8fe6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 10:45:24 -0800 Subject: [PATCH 02/17] Add AssumeRole and AssumeRoleWithLDAPIdentity STS actions (#8003) * test: add integration tests for AssumeRole and AssumeRoleWithLDAPIdentity STS actions - Add s3_sts_assume_role_test.go with comprehensive tests for AssumeRole: * Parameter validation (missing RoleArn, RoleSessionName, invalid duration) * AWS SigV4 authentication with valid/invalid credentials * Temporary credential generation and usage - Add s3_sts_ldap_test.go with tests for AssumeRoleWithLDAPIdentity: * Parameter validation (missing LDAP credentials, RoleArn) * LDAP authentication scenarios (valid/invalid credentials) * Integration with LDAP server (when configured) - Update Makefile with new test targets: * test-sts: run all STS tests * test-sts-assume-role: run AssumeRole tests only * test-sts-ldap: run LDAP STS tests only * test-sts-suite: run tests with full service lifecycle - Enhance setup_all_tests.sh: * Add OpenLDAP container setup for LDAP testing * Create test LDAP users (testuser, ldapadmin) * Set LDAP environment variables for tests * Update cleanup to remove LDAP container - Fix setup_keycloak.sh: * Enable verbose error logging for realm creation * Improve error diagnostics Tests use fail-fast approach (t.Fatal) when server not configured, ensuring clear feedback when infrastructure is missing. * feat: implement AssumeRole and AssumeRoleWithLDAPIdentity STS actions Implement two new STS actions to match MinIO's STS feature set: **AssumeRole Implementation:** - Add handleAssumeRole with full AWS SigV4 authentication - Integrate with existing IAM infrastructure via verifyV4Signature - Validate required parameters (RoleArn, RoleSessionName) - Validate DurationSeconds (900-43200 seconds range) - Generate temporary credentials with expiration - Return AWS-compatible XML response **AssumeRoleWithLDAPIdentity Implementation:** - Add handleAssumeRoleWithLDAPIdentity handler (stub) - Validate LDAP-specific parameters (LDAPUsername, LDAPPassword) - Validate common STS parameters (RoleArn, RoleSessionName, DurationSeconds) - Return proper error messages for missing LDAP provider - Ready for LDAP provider integration **Routing Fixes:** - Add explicit routes for AssumeRole and AssumeRoleWithLDAPIdentity - Prevent IAM handler from intercepting authenticated STS requests - Ensure proper request routing priority **Handler Infrastructure:** - Add IAM field to STSHandlers for SigV4 verification - Update NewSTSHandlers to accept IAM reference - Add STS-specific error codes and response types - Implement writeSTSErrorResponse for AWS-compatible errors The AssumeRole action is fully functional and tested. AssumeRoleWithLDAPIdentity requires LDAP provider implementation. * fix: update IAM matcher to exclude STS actions from interception Update the IAM handler matcher to check for STS actions (AssumeRole, AssumeRoleWithWebIdentity, AssumeRoleWithLDAPIdentity) and exclude them from IAM handler processing. This allows STS requests to be handled by the STS fallback handler even when they include AWS SigV4 authentication. The matcher now parses the form data to check the Action parameter and returns false for STS actions, ensuring they are routed to the correct handler. Note: This is a work-in-progress fix. Tests are still showing some routing issues that need further investigation. * fix: address PR review security issues for STS handlers This commit addresses all critical security issues from PR review: Security Fixes: - Use crypto/rand for cryptographically secure credential generation instead of time.Now().UnixNano() (fixes predictable credentials) - Add sts:AssumeRole permission check via VerifyActionPermission to prevent unauthorized role assumption - Generate proper session tokens using crypto/rand instead of placeholder strings Code Quality Improvements: - Refactor DurationSeconds parsing into reusable parseDurationSeconds() helper function used by all three STS handlers - Create generateSecureCredentials() helper for consistent and secure temporary credential generation - Fix iamMatcher to check query string as fallback when Action not found in form data LDAP Provider Implementation: - Add go-ldap/ldap/v3 dependency - Create LDAPProvider implementing IdentityProvider interface with full LDAP authentication support (connect, bind, search, groups) - Update ProviderFactory to create real LDAP providers - Wire LDAP provider into AssumeRoleWithLDAPIdentity handler Test Infrastructure: - Add LDAP user creation verification step in setup_all_tests.sh * fix: address PR feedback (Round 2) - config validation & provider improvements - Implement `validateLDAPConfig` in `ProviderFactory` - Improve `LDAPProvider.Initialize`: - Support `connectionTimeout` parsing (string/int/float) from config map - Warn if `BindDN` is present but `BindPassword` is empty - Improve `LDAPProvider.GetUserInfo`: - Add fallback to `searchUserGroups` if `memberOf` returns no groups (consistent with Authenticate) * fix: address PR feedback (Round 3) - LDAP connection improvements & build fix - Improve `LDAPProvider` connection handling: - Use `net.Dialer` with configured timeout for connection establishment - Enforce TLS 1.2+ (`MinVersion: tls.VersionTLS12`) for both LDAPS and StartTLS - Fix build error in `s3api_sts.go` (format verb for ErrorCode) * fix: address PR feedback (Round 4) - LDAP hardening, Authz check & Routing fix - LDAP Provider Hardening: - Prevent re-initialization - Enforce single user match in `GetUserInfo` (was explicit only in Authenticate) - Ensure connection closure if StartTLS fails - STS Handlers: - Add robust provider detection using type assertion - **Security**: Implement authorization check (`VerifyActionPermission`) after LDAP authentication - Routing: - Update tests to reflect that STS actions are handled by STS handler, not generic IAM * fix: address PR feedback (Round 5) - JWT tokens, ARN formatting, PrincipalArn CRITICAL FIXES: - Replace standalone credential generation with STS service JWT tokens - handleAssumeRole now generates proper JWT session tokens - handleAssumeRoleWithLDAPIdentity now generates proper JWT session tokens - Session tokens can be validated across distributed instances - Fix ARN formatting in responses - Extract role name from ARN using utils.ExtractRoleNameFromArn() - Prevents malformed ARNs like "arn:aws:sts::assumed-role/arn:aws:iam::..." - Add configurable AccountId for federated users - Add AccountId field to STSConfig (defaults to "111122223333") - PrincipalArn now uses configured account ID instead of hardcoded "aws" - Enables proper trust policy validation IMPROVEMENTS: - Sanitize LDAP authentication error messages (don't leak internal details) - Remove duplicate comment in provider detection - Add utils import for ARN parsing utilities * feat: implement LDAP connection pooling to prevent resource exhaustion PERFORMANCE IMPROVEMENT: - Add connection pool to LDAPProvider (default size: 10 connections) - Reuse LDAP connections across authentication requests - Prevent file descriptor exhaustion under high load IMPLEMENTATION: - connectionPool struct with channel-based connection management - getConnection(): retrieves from pool or creates new connection - returnConnection(): returns healthy connections to pool - createConnection(): establishes new LDAP connection with TLS support - Close(): cleanup method to close all pooled connections - Connection health checking (IsClosing()) before reuse BENEFITS: - Reduced connection overhead (no TCP handshake per request) - Better resource utilization under load - Prevents "too many open files" errors - Non-blocking pool operations (creates new conn if pool empty) * fix: correct TokenGenerator access in STS handlers CRITICAL FIX: - Make TokenGenerator public in STSService (was private tokenGenerator) - Update all references from Config.TokenGenerator to TokenGenerator - Remove TokenGenerator from STSConfig (it belongs in STSService) This fixes the "NotImplemented" errors in distributed and Keycloak tests. The issue was that Round 5 changes tried to access Config.TokenGenerator which didn't exist - TokenGenerator is a field in STSService, not STSConfig. The TokenGenerator is properly initialized in STSService.Initialize() and is now accessible for JWT token generation in AssumeRole handlers. * fix: update tests to use public TokenGenerator field Following the change to make TokenGenerator public in STSService, this commit updates the test files to reference the correct public field name. This resolves compilation errors in the IAM STS test suite. * fix: update distributed tests to use valid Keycloak users Updated s3_iam_distributed_test.go to use 'admin-user' and 'read-user' which exist in the standard Keycloak setup provided by setup_keycloak.sh. This resolves 'unknown test user' errors in distributed integration tests. * fix: ensure iam_config.json exists in setup target for CI The GitHub Actions workflow calls 'make setup' which was not creating iam_config.json, causing the server to start without IAM integration enabled (iamIntegration = nil), resulting in NotImplemented errors. Now 'make setup' copies iam_config.local.json to iam_config.json if it doesn't exist, ensuring IAM is properly configured in CI. * fix(iam/ldap): fix connection pool race and rebind corruption - Add atomic 'closed' flag to connection pool to prevent racing on Close() - Rebind authenticated user connections back to service account before returning to pool - Close connections on error instead of returning potentially corrupted state to pool * fix(iam/ldap): populate standard TokenClaims fields in ValidateToken - Set Subject, Issuer, Audience, IssuedAt, and ExpiresAt to satisfy the interface - Use time.Time for timestamps as required by TokenClaims struct - Default to 1 hour TTL for LDAP tokens * fix(s3api): include account ID in STS AssumedRoleUser ARN - Consistent with AWS, include the account ID in the assumed-role ARN - Use the configured account ID from STS service if available, otherwise default to '111122223333' - Apply to both AssumeRole and AssumeRoleWithLDAPIdentity handlers - Also update .gitignore to ignore IAM test environment files * refactor(s3api): extract shared STS credential generation logic - Move common logic for session claims and credential generation to prepareSTSCredentials - Update handleAssumeRole and handleAssumeRoleWithLDAPIdentity to use the helper - Remove stale comments referencing outdated line numbers * feat(iam/ldap): make pool size configurable and add audience support - Add PoolSize to LDAPConfig (default 10) - Add Audience to LDAPConfig to align with OIDC validation - Update initialization and ValidateToken to use new fields * update tests * debug * chore(iam): cleanup debug prints and fix test config port * refactor(iam): use mapstructure for LDAP config parsing * feat(sts): implement strict trust policy validation for AssumeRole * test(iam): refactor STS tests to use AWS SDK signer * test(s3api): implement ValidateTrustPolicyForPrincipal in MockIAMIntegration * fix(s3api): ensure IAM matcher checks query string on ParseForm error * fix(sts): use crypto/rand for secure credentials and extract constants * fix(iam): fix ldap connection leaks and add insecure warning * chore(iam): improved error wrapping and test parameterization * feat(sts): add support for LDAPProviderName parameter * Update weed/iam/ldap/ldap_provider.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/s3api/s3api_sts.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix(sts): use STSErrSTSNotReady when LDAP provider is missing * fix(sts): encapsulate TokenGenerator in STSService and add getter --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .gitignore | 1 + go.mod | 2 + go.sum | 4 + test/s3/iam/Makefile | 27 +- test/s3/iam/iam_config.json | 114 +++- test/s3/iam/iam_config.local.json | 6 +- test/s3/iam/s3_iam_distributed_test.go | 8 +- test/s3/iam/s3_sts_assume_role_test.go | 357 ++++++++++++ test/s3/iam/s3_sts_ldap_test.go | 291 ++++++++++ test/s3/iam/setup_all_tests.sh | 82 +++ test/s3/iam/setup_keycloak.sh | 2 +- weed/iam/integration/advanced_policy_test.go | 4 +- weed/iam/integration/iam_integration_test.go | 6 +- weed/iam/integration/iam_manager.go | 18 +- weed/iam/integration/iam_manager_trust.go | 43 ++ weed/iam/ldap/ldap_provider.go | 571 +++++++++++++++++++ weed/iam/sts/cross_instance_token_test.go | 24 +- weed/iam/sts/distributed_sts_test.go | 24 +- weed/iam/sts/provider_factory.go | 15 +- weed/iam/sts/sts_service.go | 14 +- weed/s3api/auth_credentials_trust.go | 15 + weed/s3api/auth_signature_v4_sts_test.go | 12 +- weed/s3api/s3_end_to_end_test.go | 8 +- weed/s3api/s3_iam_middleware.go | 9 + weed/s3api/s3_jwt_auth_test.go | 10 +- weed/s3api/s3_multipart_iam_test.go | 4 +- weed/s3api/s3_presigned_url_iam_test.go | 6 +- weed/s3api/s3api_server.go | 40 +- weed/s3api/s3api_server_routing_test.go | 4 +- weed/s3api/s3api_sts.go | 429 +++++++++++++- 30 files changed, 2027 insertions(+), 123 deletions(-) create mode 100644 test/s3/iam/s3_sts_assume_role_test.go create mode 100644 test/s3/iam/s3_sts_ldap_test.go create mode 100644 weed/iam/integration/iam_manager_trust.go create mode 100644 weed/iam/ldap/ldap_provider.go create mode 100644 weed/s3api/auth_credentials_trust.go diff --git a/.gitignore b/.gitignore index 10bc81f63..f8e614b17 100644 --- a/.gitignore +++ b/.gitignore @@ -137,3 +137,4 @@ test/s3/remote_cache/primary-server.pid # ID and PID files *.id *.pid +test/s3/iam/.test_env diff --git a/go.mod b/go.mod index 6cce5cff3..f90f3fb0d 100644 --- a/go.mod +++ b/go.mod @@ -183,6 +183,8 @@ require ( github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/version v0.0.0-20250314144055-3860cd14adf2 // indirect github.com/dave/dst v0.27.2 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect + github.com/go-ldap/ldap/v3 v3.4.12 // indirect github.com/goccy/go-yaml v1.18.0 // indirect github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect github.com/google/go-cmp v0.7.0 // indirect diff --git a/go.sum b/go.sum index 90276eb64..3fdbd2de6 100644 --- a/go.sum +++ b/go.sum @@ -936,6 +936,8 @@ github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= +github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo= +github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68= @@ -957,6 +959,8 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4= +github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= diff --git a/test/s3/iam/Makefile b/test/s3/iam/Makefile index 5113b6b57..aad6d4fbd 100644 --- a/test/s3/iam/Makefile +++ b/test/s3/iam/Makefile @@ -57,6 +57,10 @@ setup: ## Setup test environment @echo "Setting up test environment..." @mkdir -p test-volume-data/filerldb2 @mkdir -p test-volume-data/m9333 + @if [ ! -f iam_config.json ]; then \ + echo "Creating iam_config.json from iam_config.local.json..."; \ + cp iam_config.local.json iam_config.json; \ + fi start-services: ## Start SeaweedFS services for testing @echo "Starting SeaweedFS services using weed mini..." @@ -125,6 +129,10 @@ clean: stop-services ## Clean up test environment @rm -rf test-volume-data @rm -f weed-*.log @rm -f *.test + @rm -f iam_config.json + @rm -f .test_env + @docker rm -f keycloak-iam-test >/dev/null 2>&1 || true + @docker rm -f openldap-iam-test >/dev/null 2>&1 || true @echo "Cleanup complete" logs: ## Show service logs @@ -176,6 +184,20 @@ test-context: ## Test only contextual policy enforcement test-presigned: ## Test only presigned URL integration go test -v -run TestS3IAMPresignedURLIntegration ./... +test-sts: ## Run all STS tests + go test -v -run "TestSTS" ./... + +test-sts-assume-role: ## Run AssumeRole STS tests + go test -v -run "TestSTSAssumeRole" ./... + +test-sts-ldap: ## Run LDAP STS tests + go test -v -run "TestSTSLDAP" ./... + +test-sts-suite: start-services ## Run all STS tests with full environment setup/teardown + @echo "Running STS test suite..." + -go test -v -run "TestSTS" ./... + @$(MAKE) stop-services + # Performance testing benchmark: setup start-services wait-for-services ## Run performance benchmarks @echo "🏁 Running IAM performance benchmarks..." @@ -240,7 +262,7 @@ docker-build: ## Build custom SeaweedFS image for Docker tests # All PHONY targets .PHONY: test test-quick run-tests setup start-services stop-services wait-for-services clean logs status debug -.PHONY: test-auth test-policy test-expiration test-multipart test-bucket-policy test-context test-presigned +.PHONY: test-auth test-policy test-expiration test-multipart test-bucket-policy test-context test-presigned test-sts test-sts-assume-role test-sts-ldap .PHONY: benchmark ci watch install-deps docker-test docker-up docker-down docker-logs docker-build .PHONY: test-distributed test-performance test-stress test-versioning-stress test-keycloak-full test-all-previously-skipped setup-all-tests help-advanced @@ -275,6 +297,9 @@ test-all-previously-skipped: ## Run all previously skipped tests @echo "🎯 Running all previously skipped tests..." @./run_all_tests.sh +.PHONY: cleanup +cleanup: clean + setup-all-tests: ## Setup environment for all tests (including Keycloak) @echo "🚀 Setting up complete test environment..." @./setup_all_tests.sh diff --git a/test/s3/iam/iam_config.json b/test/s3/iam/iam_config.json index 7a903b047..ed1f0df47 100644 --- a/test/s3/iam/iam_config.json +++ b/test/s3/iam/iam_config.json @@ -1,7 +1,7 @@ { "sts": { "tokenDuration": "1h", - "maxSessionLength": "12h", + "maxSessionLength": "12h", "issuer": "seaweedfs-sts", "signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=" }, @@ -24,7 +24,11 @@ "clientSecret": "seaweedfs-s3-secret", "jwksUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/certs", "userInfoUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/userinfo", - "scopes": ["openid", "profile", "email"], + "scopes": [ + "openid", + "profile", + "email" + ], "claimsMapping": { "username": "preferred_username", "email": "email", @@ -38,13 +42,13 @@ "role": "arn:aws:iam::role/KeycloakAdminRole" }, { - "claim": "roles", + "claim": "roles", "value": "s3-read-only", "role": "arn:aws:iam::role/KeycloakReadOnlyRole" }, { "claim": "roles", - "value": "s3-write-only", + "value": "s3-write-only", "role": "arn:aws:iam::role/KeycloakWriteOnlyRole" }, { @@ -73,15 +77,19 @@ "Principal": { "Federated": "test-oidc" }, - "Action": ["sts:AssumeRoleWithWebIdentity"] + "Action": [ + "sts:AssumeRoleWithWebIdentity" + ] } ] }, - "attachedPolicies": ["S3AdminPolicy"], + "attachedPolicies": [ + "S3AdminPolicy" + ], "description": "Admin role for testing" }, { - "roleName": "TestReadOnlyRole", + "roleName": "TestReadOnlyRole", "roleArn": "arn:aws:iam::role/TestReadOnlyRole", "trustPolicy": { "Version": "2012-10-17", @@ -91,15 +99,19 @@ "Principal": { "Federated": "test-oidc" }, - "Action": ["sts:AssumeRoleWithWebIdentity"] + "Action": [ + "sts:AssumeRoleWithWebIdentity" + ] } ] }, - "attachedPolicies": ["S3ReadOnlyPolicy"], + "attachedPolicies": [ + "S3ReadOnlyPolicy" + ], "description": "Read-only role for testing" }, { - "roleName": "TestWriteOnlyRole", + "roleName": "TestWriteOnlyRole", "roleArn": "arn:aws:iam::role/TestWriteOnlyRole", "trustPolicy": { "Version": "2012-10-17", @@ -109,11 +121,15 @@ "Principal": { "Federated": "test-oidc" }, - "Action": ["sts:AssumeRoleWithWebIdentity"] + "Action": [ + "sts:AssumeRoleWithWebIdentity" + ] } ] }, - "attachedPolicies": ["S3WriteOnlyPolicy"], + "attachedPolicies": [ + "S3WriteOnlyPolicy" + ], "description": "Write-only role for testing" }, { @@ -127,11 +143,15 @@ "Principal": { "Federated": "keycloak" }, - "Action": ["sts:AssumeRoleWithWebIdentity"] + "Action": [ + "sts:AssumeRoleWithWebIdentity" + ] } ] }, - "attachedPolicies": ["S3AdminPolicy"], + "attachedPolicies": [ + "S3AdminPolicy" + ], "description": "Admin role for Keycloak users" }, { @@ -145,11 +165,15 @@ "Principal": { "Federated": "keycloak" }, - "Action": ["sts:AssumeRoleWithWebIdentity"] + "Action": [ + "sts:AssumeRoleWithWebIdentity" + ] } ] }, - "attachedPolicies": ["S3ReadOnlyPolicy"], + "attachedPolicies": [ + "S3ReadOnlyPolicy" + ], "description": "Read-only role for Keycloak users" }, { @@ -163,11 +187,15 @@ "Principal": { "Federated": "keycloak" }, - "Action": ["sts:AssumeRoleWithWebIdentity"] + "Action": [ + "sts:AssumeRoleWithWebIdentity" + ] } ] }, - "attachedPolicies": ["S3WriteOnlyPolicy"], + "attachedPolicies": [ + "S3WriteOnlyPolicy" + ], "description": "Write-only role for Keycloak users" }, { @@ -181,11 +209,15 @@ "Principal": { "Federated": "keycloak" }, - "Action": ["sts:AssumeRoleWithWebIdentity"] + "Action": [ + "sts:AssumeRoleWithWebIdentity" + ] } ] }, - "attachedPolicies": ["S3ReadWritePolicy"], + "attachedPolicies": [ + "S3ReadWritePolicy" + ], "description": "Read-write role for Keycloak users" } ], @@ -197,13 +229,21 @@ "Statement": [ { "Effect": "Allow", - "Action": ["s3:*"], - "Resource": ["*"] + "Action": [ + "s3:*" + ], + "Resource": [ + "*" + ] }, { "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] + "Action": [ + "sts:ValidateSession" + ], + "Resource": [ + "*" + ] } ] } @@ -211,7 +251,7 @@ { "name": "S3ReadOnlyPolicy", "document": { - "Version": "2012-10-17", + "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", @@ -226,8 +266,12 @@ }, { "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] + "Action": [ + "sts:ValidateSession" + ], + "Resource": [ + "*" + ] } ] } @@ -260,8 +304,12 @@ }, { "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] + "Action": [ + "sts:ValidateSession" + ], + "Resource": [ + "*" + ] } ] } @@ -283,8 +331,12 @@ }, { "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] + "Action": [ + "sts:ValidateSession" + ], + "Resource": [ + "*" + ] } ] } diff --git a/test/s3/iam/iam_config.local.json b/test/s3/iam/iam_config.local.json index 30522771b..ed1f0df47 100644 --- a/test/s3/iam/iam_config.local.json +++ b/test/s3/iam/iam_config.local.json @@ -19,11 +19,11 @@ "type": "oidc", "enabled": true, "config": { - "issuer": "http://localhost:8090/realms/seaweedfs-test", + "issuer": "http://localhost:8080/realms/seaweedfs-test", "clientId": "seaweedfs-s3", "clientSecret": "seaweedfs-s3-secret", - "jwksUri": "http://localhost:8090/realms/seaweedfs-test/protocol/openid-connect/certs", - "userInfoUri": "http://localhost:8090/realms/seaweedfs-test/protocol/openid-connect/userinfo", + "jwksUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/certs", + "userInfoUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/userinfo", "scopes": [ "openid", "profile", diff --git a/test/s3/iam/s3_iam_distributed_test.go b/test/s3/iam/s3_iam_distributed_test.go index fbaf25e9d..be44f1e00 100644 --- a/test/s3/iam/s3_iam_distributed_test.go +++ b/test/s3/iam/s3_iam_distributed_test.go @@ -30,10 +30,10 @@ func TestS3IAMDistributedTests(t *testing.T) { // Create S3 clients that would connect to different gateway instances // In a real distributed setup, these would point to different S3 gateway ports - client1, err := framework.CreateS3ClientWithJWT("test-user", "TestAdminRole") + client1, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") require.NoError(t, err) - client2, err := framework.CreateS3ClientWithJWT("test-user", "TestAdminRole") + client2, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") require.NoError(t, err) // Both clients should be able to perform operations @@ -70,7 +70,7 @@ func TestS3IAMDistributedTests(t *testing.T) { adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") require.NoError(t, err) - readOnlyClient, err := framework.CreateS3ClientWithJWT("readonly-user", "TestReadOnlyRole") + readOnlyClient, err := framework.CreateS3ClientWithJWT("read-user", "TestReadOnlyRole") require.NoError(t, err) bucketName := "test-distributed-roles" @@ -160,7 +160,7 @@ func TestS3IAMDistributedTests(t *testing.T) { go func(goroutineID int) { defer wg.Done() - client, err := framework.CreateS3ClientWithJWT(fmt.Sprintf("user-%d", goroutineID), "TestAdminRole") + client, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") if err != nil { errors <- fmt.Errorf("failed to create S3 client for goroutine %d: %w", goroutineID, err) return diff --git a/test/s3/iam/s3_sts_assume_role_test.go b/test/s3/iam/s3_sts_assume_role_test.go new file mode 100644 index 000000000..36fa4e2d8 --- /dev/null +++ b/test/s3/iam/s3_sts_assume_role_test.go @@ -0,0 +1,357 @@ +package iam + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// AssumeRoleResponse represents the STS AssumeRole response +type AssumeRoleTestResponse struct { + XMLName xml.Name `xml:"AssumeRoleResponse"` + Result struct { + Credentials struct { + AccessKeyId string `xml:"AccessKeyId"` + SecretAccessKey string `xml:"SecretAccessKey"` + SessionToken string `xml:"SessionToken"` + Expiration string `xml:"Expiration"` + } `xml:"Credentials"` + AssumedRoleUser struct { + AssumedRoleId string `xml:"AssumedRoleId"` + Arn string `xml:"Arn"` + } `xml:"AssumedRoleUser"` + } `xml:"AssumeRoleResult"` +} + +// TestSTSAssumeRoleValidation tests input validation for AssumeRole endpoint +func TestSTSAssumeRoleValidation(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if !isSTSEndpointRunning(t) { + t.Fatal("SeaweedFS STS endpoint is not running at", TestSTSEndpoint, "- please run 'make setup-all-tests' first") + } + + // Check if AssumeRole is implemented by making a test call + if !isAssumeRoleImplemented(t) { + t.Fatal("AssumeRole action is not implemented in the running server - please rebuild weed binary with new code and restart the server") + } + + t.Run("missing_role_arn", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleSessionName": {"test-session"}, + // RoleArn is missing + }, "test-access-key", "test-secret-key") + require.NoError(t, err) + defer resp.Body.Close() + + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail without RoleArn") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + assert.Equal(t, "MissingParameter", errResp.Error.Code) + }) + + t.Run("missing_role_session_name", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test-role"}, + // RoleSessionName is missing + }, "test-access-key", "test-secret-key") + require.NoError(t, err) + defer resp.Body.Close() + + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail without RoleSessionName") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + assert.Equal(t, "MissingParameter", errResp.Error.Code) + }) + + t.Run("unsupported_action_for_anonymous", func(t *testing.T) { + // AssumeRole requires SigV4 authentication, anonymous requests should fail + resp, err := callSTSAPI(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test-role"}, + "RoleSessionName": {"test-session"}, + }) + require.NoError(t, err) + defer resp.Body.Close() + + // Should fail because AssumeRole requires AWS SigV4 authentication + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "AssumeRole should require authentication") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response for anonymous AssumeRole: status=%d, body=%s", resp.StatusCode, string(body)) + }) + + t.Run("invalid_duration_too_short", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test-role"}, + "RoleSessionName": {"test-session"}, + "DurationSeconds": {"100"}, // Less than 900 seconds minimum + }, "test-access-key", "test-secret-key") + require.NoError(t, err) + defer resp.Body.Close() + + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail with DurationSeconds < 900") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + assert.Equal(t, "InvalidParameterValue", errResp.Error.Code) + }) + + t.Run("invalid_duration_too_long", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test-role"}, + "RoleSessionName": {"test-session"}, + "DurationSeconds": {"100000"}, // More than 43200 seconds maximum + }, "test-access-key", "test-secret-key") + require.NoError(t, err) + defer resp.Body.Close() + + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail with DurationSeconds > 43200") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + assert.Equal(t, "InvalidParameterValue", errResp.Error.Code) + }) +} + +// isAssumeRoleImplemented checks if the running server supports AssumeRole +func isAssumeRoleImplemented(t *testing.T) bool { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test"}, + "RoleSessionName": {"test"}, + }, "test", "test") + if err != nil { + return false + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + + // If we get "NotImplemented", the action isn't supported + var errResp STSErrorTestResponse + if xml.Unmarshal(body, &errResp) == nil && errResp.Error.Code == "NotImplemented" { + return false + } + + // If we get InvalidAction, the action isn't routed + if errResp.Error.Code == "InvalidAction" { + return false + } + + return true +} + +// TestSTSAssumeRoleWithValidCredentials tests AssumeRole with valid IAM credentials +// This test requires a configured IAM user in SeaweedFS +func TestSTSAssumeRoleWithValidCredentials(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if !isSTSEndpointRunning(t) { + t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint) + } + + // Use test credentials from environment or fall back to defaults + accessKey := os.Getenv("STS_TEST_ACCESS_KEY") + if accessKey == "" { + accessKey = "admin" + } + secretKey := os.Getenv("STS_TEST_SECRET_KEY") + if secretKey == "" { + secretKey = "admin" + } + + t.Run("successful_assume_role", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/admin"}, + "RoleSessionName": {"integration-test-session"}, + }, accessKey, secretKey) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response status: %d, body: %s", resp.StatusCode, string(body)) + + // If AssumeRole is not yet implemented, expect an error about unsupported action + if resp.StatusCode != http.StatusOK { + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + t.Logf("Error response: code=%s, message=%s", errResp.Error.Code, errResp.Error.Message) + + // This test will initially fail until AssumeRole is implemented + // Once implemented, uncomment the assertions below + // assert.Fail(t, "AssumeRole not yet implemented") + } else { + var stsResp AssumeRoleTestResponse + err = xml.Unmarshal(body, &stsResp) + require.NoError(t, err, "Failed to parse response: %s", string(body)) + + creds := stsResp.Result.Credentials + assert.NotEmpty(t, creds.AccessKeyId, "AccessKeyId should not be empty") + assert.NotEmpty(t, creds.SecretAccessKey, "SecretAccessKey should not be empty") + assert.NotEmpty(t, creds.SessionToken, "SessionToken should not be empty") + assert.NotEmpty(t, creds.Expiration, "Expiration should not be empty") + + t.Logf("Successfully obtained temporary credentials: AccessKeyId=%s", creds.AccessKeyId) + } + }) + + t.Run("with_custom_duration", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/admin"}, + "RoleSessionName": {"duration-test-session"}, + "DurationSeconds": {"3600"}, // 1 hour + }, accessKey, secretKey) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response status: %d, body: %s", resp.StatusCode, string(body)) + + // Verify DurationSeconds is accepted + if resp.StatusCode != http.StatusOK { + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + // Should not fail due to DurationSeconds parameter + assert.NotContains(t, errResp.Error.Message, "DurationSeconds", + "DurationSeconds parameter should be accepted") + } + }) +} + +// TestSTSAssumeRoleWithInvalidCredentials tests AssumeRole rejection with bad credentials +func TestSTSAssumeRoleWithInvalidCredentials(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if !isSTSEndpointRunning(t) { + t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint) + } + + t.Run("invalid_access_key", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/admin"}, + "RoleSessionName": {"test-session"}, + }, "invalid-access-key", "some-secret-key") + require.NoError(t, err) + defer resp.Body.Close() + + // Should fail with access denied or signature mismatch + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail with invalid access key") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response for invalid credentials: status=%d, body=%s", resp.StatusCode, string(body)) + }) + + t.Run("invalid_secret_key", func(t *testing.T) { + resp, err := callSTSAPIWithSigV4(t, url.Values{ + "Action": {"AssumeRole"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/admin"}, + "RoleSessionName": {"test-session"}, + }, "admin", "wrong-secret-key") + require.NoError(t, err) + defer resp.Body.Close() + + // Should fail with signature mismatch + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail with invalid secret key") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response for wrong secret: status=%d, body=%s", resp.StatusCode, string(body)) + }) +} + +// callSTSAPIWithSigV4 makes an STS API call with AWS Signature V4 authentication +func callSTSAPIWithSigV4(t *testing.T, params url.Values, accessKey, secretKey string) (*http.Response, error) { + // Prepare request body + body := params.Encode() + + // Create request + req, err := http.NewRequest(http.MethodPost, TestSTSEndpoint+"/", + strings.NewReader(body)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Host", req.URL.Host) + + // Sign request with AWS Signature V4 using official SDK + creds := credentials.NewStaticCredentials(accessKey, secretKey, "") + signer := v4.NewSigner(creds) + + // Read body for signing + // Note: We need a ReadSeeker for the signer, or we can pass the body string/bytes to ComputeBodyHash if needed, + // but standard Sign method takes an io.ReadSeeker for the body. + bodyReader := strings.NewReader(body) + _, err = signer.Sign(req, bodyReader, "sts", "us-east-1", time.Now()) + if err != nil { + return nil, fmt.Errorf("failed to sign request: %w", err) + } + + client := &http.Client{Timeout: 30 * time.Second} + return client.Do(req) +} diff --git a/test/s3/iam/s3_sts_ldap_test.go b/test/s3/iam/s3_sts_ldap_test.go new file mode 100644 index 000000000..c696555fb --- /dev/null +++ b/test/s3/iam/s3_sts_ldap_test.go @@ -0,0 +1,291 @@ +package iam + +import ( + "encoding/xml" + "io" + "net/http" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// AssumeRoleWithLDAPIdentityResponse represents the STS response for LDAP identity +type AssumeRoleWithLDAPIdentityTestResponse struct { + XMLName xml.Name `xml:"AssumeRoleWithLDAPIdentityResponse"` + Result struct { + Credentials struct { + AccessKeyId string `xml:"AccessKeyId"` + SecretAccessKey string `xml:"SecretAccessKey"` + SessionToken string `xml:"SessionToken"` + Expiration string `xml:"Expiration"` + } `xml:"Credentials"` + } `xml:"AssumeRoleWithLDAPIdentityResult"` +} + +// TestSTSLDAPValidation tests input validation for AssumeRoleWithLDAPIdentity +func TestSTSLDAPValidation(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if !isSTSEndpointRunning(t) { + t.Fatal("SeaweedFS STS endpoint is not running at", TestSTSEndpoint, "- please run 'make setup-all-tests' first") + } + + // Check if AssumeRoleWithLDAPIdentity is implemented + if !isLDAPIdentityActionImplemented(t) { + t.Fatal("AssumeRoleWithLDAPIdentity action is not implemented in the running server - please rebuild weed binary with new code and restart the server") + } + + t.Run("missing_ldap_username", func(t *testing.T) { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test-role"}, + "RoleSessionName": {"test-session"}, + "LDAPPassword": {"testpass"}, + // LDAPUsername is missing + }) + require.NoError(t, err) + defer resp.Body.Close() + + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail without LDAPUsername") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + // Expect either MissingParameter or InvalidAction (if not implemented) + assert.Contains(t, []string{"MissingParameter", "InvalidAction"}, errResp.Error.Code) + }) + + t.Run("missing_ldap_password", func(t *testing.T) { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test-role"}, + "RoleSessionName": {"test-session"}, + "LDAPUsername": {"testuser"}, + // LDAPPassword is missing + }) + require.NoError(t, err) + defer resp.Body.Close() + + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail without LDAPPassword") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + assert.Contains(t, []string{"MissingParameter", "InvalidAction"}, errResp.Error.Code) + }) + + t.Run("missing_role_arn", func(t *testing.T) { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleSessionName": {"test-session"}, + "LDAPUsername": {"testuser"}, + "LDAPPassword": {"testpass"}, + // RoleArn is missing + }) + require.NoError(t, err) + defer resp.Body.Close() + + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail without RoleArn") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + var errResp STSErrorTestResponse + err = xml.Unmarshal(body, &errResp) + require.NoError(t, err, "Failed to parse error response: %s", string(body)) + assert.Contains(t, []string{"MissingParameter", "InvalidAction"}, errResp.Error.Code) + }) + + t.Run("invalid_duration_too_short", func(t *testing.T) { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test-role"}, + "RoleSessionName": {"test-session"}, + "LDAPUsername": {"testuser"}, + "LDAPPassword": {"testpass"}, + "DurationSeconds": {"100"}, // Less than 900 seconds minimum + }) + require.NoError(t, err) + defer resp.Body.Close() + + // If the action is implemented, it should reject invalid duration + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response for invalid duration: status=%d, body=%s", resp.StatusCode, string(body)) + }) +} + +// TestSTSLDAPWithValidCredentials tests LDAP authentication +// This test requires an LDAP server to be configured +func TestSTSLDAPWithValidCredentials(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if !isSTSEndpointRunning(t) { + t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint) + } + + // Check if LDAP is configured (skip if not) + if !isLDAPConfigured() { + t.Skip("LDAP is not configured - skipping LDAP integration tests") + } + + t.Run("successful_ldap_auth", func(t *testing.T) { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/ldap-user"}, + "RoleSessionName": {"ldap-test-session"}, + "LDAPUsername": {"testuser"}, + "LDAPPassword": {"testpass"}, + }) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response status: %d, body: %s", resp.StatusCode, string(body)) + + if resp.StatusCode == http.StatusOK { + var stsResp AssumeRoleWithLDAPIdentityTestResponse + err = xml.Unmarshal(body, &stsResp) + require.NoError(t, err, "Failed to parse response: %s", string(body)) + + creds := stsResp.Result.Credentials + assert.NotEmpty(t, creds.AccessKeyId, "AccessKeyId should not be empty") + assert.NotEmpty(t, creds.SecretAccessKey, "SecretAccessKey should not be empty") + assert.NotEmpty(t, creds.SessionToken, "SessionToken should not be empty") + assert.NotEmpty(t, creds.Expiration, "Expiration should not be empty") + } + }) +} + +// TestSTSLDAPWithInvalidCredentials tests LDAP rejection with bad credentials +func TestSTSLDAPWithInvalidCredentials(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if !isSTSEndpointRunning(t) { + t.Skip("SeaweedFS STS endpoint is not running at", TestSTSEndpoint) + } + + t.Run("invalid_ldap_password", func(t *testing.T) { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/ldap-user"}, + "RoleSessionName": {"ldap-test-session"}, + "LDAPUsername": {"testuser"}, + "LDAPPassword": {"wrong-password"}, + }) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response for invalid LDAP credentials: status=%d, body=%s", resp.StatusCode, string(body)) + + // Should fail (either AccessDenied or InvalidAction if not implemented) + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail with invalid LDAP password") + }) + + t.Run("nonexistent_ldap_user", func(t *testing.T) { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/ldap-user"}, + "RoleSessionName": {"ldap-test-session"}, + "LDAPUsername": {"nonexistent-user-12345"}, + "LDAPPassword": {"somepassword"}, + }) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("Response for nonexistent user: status=%d, body=%s", resp.StatusCode, string(body)) + + // Should fail + assert.NotEqual(t, http.StatusOK, resp.StatusCode, + "Should fail with nonexistent LDAP user") + }) +} + +// callSTSAPIForLDAP makes an STS API call for LDAP operation +func callSTSAPIForLDAP(t *testing.T, params url.Values) (*http.Response, error) { + req, err := http.NewRequest(http.MethodPost, TestSTSEndpoint+"/", + strings.NewReader(params.Encode())) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + client := &http.Client{Timeout: 30 * time.Second} + return client.Do(req) +} + +// isLDAPConfigured checks if LDAP server is configured and available +func isLDAPConfigured() bool { + // Check environment variable for LDAP URL + ldapURL := os.Getenv("LDAP_URL") + return ldapURL != "" +} + +// isLDAPIdentityActionImplemented checks if the running server supports AssumeRoleWithLDAPIdentity +func isLDAPIdentityActionImplemented(t *testing.T) bool { + resp, err := callSTSAPIForLDAP(t, url.Values{ + "Action": {"AssumeRoleWithLDAPIdentity"}, + "Version": {"2011-06-15"}, + "RoleArn": {"arn:aws:iam::role/test"}, + "RoleSessionName": {"test"}, + "LDAPUsername": {"test"}, + "LDAPPassword": {"test"}, + }) + if err != nil { + return false + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + + // If we get "NotImplemented" or empty response, the action isn't supported + if len(body) == 0 { + return false + } + + var errResp STSErrorTestResponse + if xml.Unmarshal(body, &errResp) == nil && errResp.Error.Code == "NotImplemented" { + return false + } + + // If we get InvalidAction, the action isn't routed + if errResp.Error.Code == "InvalidAction" { + return false + } + + return true +} diff --git a/test/s3/iam/setup_all_tests.sh b/test/s3/iam/setup_all_tests.sh index aaec54691..324a3b9e3 100755 --- a/test/s3/iam/setup_all_tests.sh +++ b/test/s3/iam/setup_all_tests.sh @@ -50,6 +50,82 @@ setup_keycloak() { echo -e "${GREEN}[OK] Keycloak setup completed${NC}" } +# Set up OpenLDAP for LDAP-based STS testing +setup_ldap() { + echo -e "\n${BLUE}1a. Setting up OpenLDAP for STS LDAP testing...${NC}" + + # Check if LDAP container is already running + if docker ps --format '{{.Names}}' | grep -q '^openldap-iam-test$'; then + echo -e "${YELLOW}OpenLDAP container already running${NC}" + echo -e "${GREEN}[OK] LDAP setup completed (using existing container)${NC}" + return 0 + fi + + # Remove any stopped container with the same name + docker rm -f openldap-iam-test 2>/dev/null || true + + # Start OpenLDAP container + echo -e "${YELLOW}🔧 Starting OpenLDAP container...${NC}" + docker run -d \ + --name openldap-iam-test \ + -p 389:389 \ + -p 636:636 \ + -e LDAP_ADMIN_PASSWORD=adminpassword \ + -e LDAP_ORGANISATION="SeaweedFS" \ + -e LDAP_DOMAIN="seaweedfs.test" \ + osixia/openldap:latest || { + echo -e "${YELLOW}⚠️ OpenLDAP setup failed (optional for basic STS tests)${NC}" + return 0 # Don't fail - LDAP is optional + } + + # Wait for LDAP to be ready + echo -e "${YELLOW}⏳ Waiting for OpenLDAP to be ready...${NC}" + for i in $(seq 1 30); do + if docker exec openldap-iam-test ldapsearch -x -H ldap://localhost -b "dc=seaweedfs,dc=test" -D "cn=admin,dc=seaweedfs,dc=test" -w adminpassword "(objectClass=*)" >/dev/null 2>&1; then + break + fi + sleep 1 + done + + # Add test users for LDAP STS testing + echo -e "${YELLOW}📝 Adding test users for LDAP STS...${NC}" + docker exec -i openldap-iam-test ldapadd -x -D "cn=admin,dc=seaweedfs,dc=test" -w adminpassword </dev/null || true +dn: ou=users,dc=seaweedfs,dc=test +objectClass: organizationalUnit +ou: users + +dn: cn=testuser,ou=users,dc=seaweedfs,dc=test +objectClass: inetOrgPerson +cn: testuser +sn: Test User +uid: testuser +userPassword: testpass + +dn: cn=ldapadmin,ou=users,dc=seaweedfs,dc=test +objectClass: inetOrgPerson +cn: ldapadmin +sn: LDAP Admin +uid: ldapadmin +userPassword: ldapadminpass +EOF + + # Verify test users were created successfully + echo -e "${YELLOW}🔍 Verifying LDAP test users...${NC}" + if docker exec openldap-iam-test ldapsearch -x -D "cn=admin,dc=seaweedfs,dc=test" -w adminpassword -b "ou=users,dc=seaweedfs,dc=test" "(cn=testuser)" cn 2>/dev/null | grep -q "cn: testuser"; then + echo -e "${GREEN}[OK] Test user 'testuser' verified${NC}" + else + echo -e "${RED}[WARN] Could not verify test user 'testuser' - LDAP tests may fail${NC}" + fi + + # Set environment for LDAP tests + export LDAP_URL="ldap://localhost:389" + export LDAP_BASE_DN="dc=seaweedfs,dc=test" + export LDAP_BIND_DN="cn=admin,dc=seaweedfs,dc=test" + export LDAP_BIND_PASSWORD="adminpassword" + + echo -e "${GREEN}[OK] LDAP setup completed${NC}" +} + # Set up SeaweedFS test cluster setup_seaweedfs_cluster() { echo -e "\n${BLUE}2. Setting up SeaweedFS test cluster...${NC}" @@ -153,6 +229,7 @@ display_summary() { echo -e "\n${BLUE}📊 Setup Summary${NC}" echo -e "${BLUE}=================${NC}" echo -e "Keycloak URL: ${KEYCLOAK_URL:-http://localhost:8080}" + echo -e "LDAP URL: ${LDAP_URL:-ldap://localhost:389}" echo -e "S3 Endpoint: ${S3_ENDPOINT:-http://localhost:8333}" echo -e "Test Timeout: ${TEST_TIMEOUT:-60m}" echo -e "IAM Config: ${SCRIPT_DIR}/iam_config.json" @@ -161,6 +238,7 @@ display_summary() { echo -e "${YELLOW}💡 You can now run tests with: make run-all-tests${NC}" echo -e "${YELLOW}💡 Or run specific tests with: go test -v -timeout=60m -run TestName${NC}" echo -e "${YELLOW}💡 To stop Keycloak: docker stop keycloak-iam-test${NC}" + echo -e "${YELLOW}💡 To stop LDAP: docker stop openldap-iam-test${NC}" } # Main execution @@ -177,6 +255,10 @@ main() { exit 1 fi + # LDAP is optional but we try to set it up + setup_ldap + setup_steps+=("ldap") + if setup_seaweedfs_cluster; then setup_steps+=("seaweedfs") else diff --git a/test/s3/iam/setup_keycloak.sh b/test/s3/iam/setup_keycloak.sh index 14fb08435..7e717bc5a 100755 --- a/test/s3/iam/setup_keycloak.sh +++ b/test/s3/iam/setup_keycloak.sh @@ -139,7 +139,7 @@ ensure_realm() { echo -e "${GREEN}[OK] Realm '${REALM_NAME}' already exists${NC}" else echo -e "${YELLOW}📝 Creating realm '${REALM_NAME}'...${NC}" - if kcadm create realms -s realm="${REALM_NAME}" -s enabled=true 2>/dev/null; then + if kcadm create realms -s realm="${REALM_NAME}" -s enabled=true; then echo -e "${GREEN}[OK] Realm created${NC}" else # Check if it exists now (might have been created by another process) diff --git a/weed/iam/integration/advanced_policy_test.go b/weed/iam/integration/advanced_policy_test.go index 0af233a37..393505d6c 100644 --- a/weed/iam/integration/advanced_policy_test.go +++ b/weed/iam/integration/advanced_policy_test.go @@ -25,7 +25,7 @@ func TestPolicyVariableSubstitution(t *testing.T) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -102,7 +102,7 @@ func TestConditionWithNumericComparison(t *testing.T) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, Condition: map[string]map[string]interface{}{ diff --git a/weed/iam/integration/iam_integration_test.go b/weed/iam/integration/iam_integration_test.go index 8aeedda5c..4740152a8 100644 --- a/weed/iam/integration/iam_integration_test.go +++ b/weed/iam/integration/iam_integration_test.go @@ -421,7 +421,7 @@ func TestTrustPolicyWildcardPrincipal(t *testing.T) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -440,7 +440,7 @@ func TestTrustPolicyWildcardPrincipal(t *testing.T) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": []interface{}{"specific-provider", "https://test-issuer.com"}, + "Federated": []interface{}{"specific-provider", "test-oidc"}, }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -646,7 +646,7 @@ func setupTestPoliciesAndRoles(t *testing.T, manager *IAMManager) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, diff --git a/weed/iam/integration/iam_manager.go b/weed/iam/integration/iam_manager.go index caaa7f31d..894a7f37c 100644 --- a/weed/iam/integration/iam_manager.go +++ b/weed/iam/integration/iam_manager.go @@ -346,7 +346,7 @@ func (m *IAMManager) ValidateTrustPolicy(ctx context.Context, roleArn, provider, if principal, ok := statement.Principal.(map[string]interface{}); ok { if federated, ok := principal["Federated"].(string); ok { // For OIDC, check against issuer URL - if provider == "oidc" && federated == "https://test-issuer.com" { + if provider == "oidc" && federated == "test-oidc" { return true } // For LDAP, check against test-ldap @@ -391,8 +391,24 @@ func (m *IAMManager) validateTrustPolicyForWebIdentity(ctx context.Context, role // The issuer is the federated provider for OIDC if iss, ok := tokenClaims["iss"].(string); ok { + // Default to issuer URL requestContext["aws:FederatedProvider"] = iss requestContext["oidc:iss"] = iss + + // Try to resolve provider name from issuer for better policy matching + // This allows policies to reference the provider name (e.g. "keycloak") instead of the full issuer URL + if m.stsService != nil { + for name, provider := range m.stsService.GetProviders() { + if oidcProvider, ok := provider.(interface{ GetIssuer() string }); ok { + confIssuer := oidcProvider.GetIssuer() + + if confIssuer == iss { + requestContext["aws:FederatedProvider"] = name + break + } + } + } + } } if sub, ok := tokenClaims["sub"].(string); ok { diff --git a/weed/iam/integration/iam_manager_trust.go b/weed/iam/integration/iam_manager_trust.go new file mode 100644 index 000000000..e97ed62f6 --- /dev/null +++ b/weed/iam/integration/iam_manager_trust.go @@ -0,0 +1,43 @@ +package integration + +import ( + "context" + "fmt" + + "github.com/seaweedfs/seaweedfs/weed/iam/policy" + "github.com/seaweedfs/seaweedfs/weed/iam/utils" +) + +// ValidateTrustPolicyForPrincipal validates if a principal is allowed to assume a role +func (m *IAMManager) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error { + if !m.initialized { + return fmt.Errorf("IAM manager not initialized") + } + + // Extract role name from ARN + roleName := utils.ExtractRoleNameFromArn(roleArn) + + // Get role definition + roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName) + if err != nil { + return fmt.Errorf("failed to get role %s: %w", roleName, err) + } + + if roleDef.TrustPolicy == nil { + return fmt.Errorf("role has no trust policy") + } + + // Create evaluation context + evalCtx := &policy.EvaluationContext{ + Principal: principalArn, + Action: "sts:AssumeRole", + Resource: roleArn, + } + + // Evaluate the trust policy + if !m.evaluateTrustPolicy(roleDef.TrustPolicy, evalCtx) { + return fmt.Errorf("trust policy denies access to principal: %s", principalArn) + } + + return nil +} diff --git a/weed/iam/ldap/ldap_provider.go b/weed/iam/ldap/ldap_provider.go new file mode 100644 index 000000000..6b02e9a3f --- /dev/null +++ b/weed/iam/ldap/ldap_provider.go @@ -0,0 +1,571 @@ +package ldap + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/go-ldap/ldap/v3" + "github.com/mitchellh/mapstructure" + "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/iam/providers" +) + +// LDAPConfig holds configuration for LDAP provider +type LDAPConfig struct { + // Server is the LDAP server URL (ldap:// or ldaps://) + Server string `json:"server"` + + // BindDN is the DN used to bind for searches (optional for anonymous bind) + BindDN string `json:"bindDN,omitempty"` + + // BindPassword is the password for the bind DN + BindPassword string `json:"bindPassword,omitempty"` + + // BaseDN is the base DN for user searches + BaseDN string `json:"baseDN"` + + // UserFilter is the filter to find users (use %s for username placeholder) + // Example: "(uid=%s)" or "(cn=%s)" or "(&(objectClass=person)(uid=%s))" + UserFilter string `json:"userFilter"` + + // GroupFilter is the filter to find user groups (use %s for user DN placeholder) + // Example: "(member=%s)" or "(memberUid=%s)" + GroupFilter string `json:"groupFilter,omitempty"` + + // GroupBaseDN is the base DN for group searches (defaults to BaseDN) + GroupBaseDN string `json:"groupBaseDN,omitempty"` + + // Attributes to retrieve from LDAP + Attributes LDAPAttributes `json:"attributes,omitempty"` + + // UseTLS enables StartTLS + UseTLS bool `json:"useTLS,omitempty"` + + // InsecureSkipVerify skips TLS certificate verification + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` + + // ConnectionTimeout is the connection timeout + ConnectionTimeout time.Duration `json:"connectionTimeout,omitempty"` + + // PoolSize is the number of connections in the pool (default: 10) + PoolSize int `json:"poolSize,omitempty"` + + // Audience is the expected audience for tokens (optional) + Audience string `json:"audience,omitempty"` +} + +// LDAPAttributes maps LDAP attribute names +type LDAPAttributes struct { + Email string `json:"email,omitempty"` // Default: mail + DisplayName string `json:"displayName,omitempty"` // Default: cn + Groups string `json:"groups,omitempty"` // Default: memberOf + UID string `json:"uid,omitempty"` // Default: uid +} + +// connectionPool manages a pool of LDAP connections for reuse +type connectionPool struct { + conns chan *ldap.Conn + mu sync.Mutex + size int + closed uint32 // atomic flag: 1 if closed, 0 if open +} + +// LDAPProvider implements the IdentityProvider interface for LDAP +type LDAPProvider struct { + name string + config *LDAPConfig + initialized bool + mu sync.RWMutex + pool *connectionPool +} + +// NewLDAPProvider creates a new LDAP provider +func NewLDAPProvider(name string) *LDAPProvider { + return &LDAPProvider{ + name: name, + } +} + +// Name returns the provider name +func (p *LDAPProvider) Name() string { + return p.name +} + +// Initialize initializes the provider with configuration +func (p *LDAPProvider) Initialize(config interface{}) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.initialized { + return fmt.Errorf("LDAP provider already initialized") + } + + cfg := &LDAPConfig{} + + // Check if input is already the correct struct type + if c, ok := config.(*LDAPConfig); ok { + cfg = c + } else { + // Parse from map using mapstructure with weak typing and time duration hook + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + ), + Result: cfg, + TagName: "json", + WeaklyTypedInput: true, + }) + if err != nil { + return fmt.Errorf("failed to create config decoder: %w", err) + } + + if err := decoder.Decode(config); err != nil { + return fmt.Errorf("failed to decode LDAP configuration: %w", err) + } + } + + // Validate required fields + if cfg.Server == "" { + return fmt.Errorf("LDAP server URL is required") + } + if cfg.BaseDN == "" { + return fmt.Errorf("LDAP base DN is required") + } + if cfg.UserFilter == "" { + cfg.UserFilter = "(cn=%s)" // Default filter + } + + // Warn if BindDN is configured but BindPassword is empty + if cfg.BindDN != "" && cfg.BindPassword == "" { + glog.Warningf("LDAP provider '%s' configured with BindDN but no BindPassword", p.name) + } + + // Warn if InsecureSkipVerify is enabled + if cfg.InsecureSkipVerify { + glog.Warningf("LDAP provider '%s' has InsecureSkipVerify enabled. Do not use in production.", p.name) + } + + // Set default attributes + if cfg.Attributes.Email == "" { + cfg.Attributes.Email = "mail" + } + if cfg.Attributes.DisplayName == "" { + cfg.Attributes.DisplayName = "cn" + } + if cfg.Attributes.Groups == "" { + cfg.Attributes.Groups = "memberOf" + } + if cfg.Attributes.UID == "" { + cfg.Attributes.UID = "uid" + } + if cfg.GroupBaseDN == "" { + cfg.GroupBaseDN = cfg.BaseDN + } + if cfg.ConnectionTimeout == 0 { + cfg.ConnectionTimeout = 10 * time.Second + } + + p.config = cfg + + // Initialize connection pool (default size: 10 connections) + poolSize := 10 + if cfg.PoolSize > 0 { + poolSize = cfg.PoolSize + } + p.pool = &connectionPool{ + conns: make(chan *ldap.Conn, poolSize), + size: poolSize, + } + + p.initialized = true + + glog.V(1).Infof("LDAP provider '%s' initialized: server=%s, baseDN=%s", + p.name, cfg.Server, cfg.BaseDN) + + return nil +} + +// getConnection gets a connection from the pool or creates a new one +func (p *LDAPProvider) getConnection() (*ldap.Conn, error) { + // Try to get a connection from the pool (non-blocking) + select { + case conn := <-p.pool.conns: + // Test if connection is still alive + if conn != nil && conn.IsClosing() { + conn.Close() + // Connection is dead, create a new one + return p.createConnection() + } + return conn, nil + default: + // Pool is empty, create a new connection + return p.createConnection() + } +} + +// returnConnection returns a connection to the pool +func (p *LDAPProvider) returnConnection(conn *ldap.Conn) { + if conn == nil || conn.IsClosing() { + if conn != nil { + conn.Close() + } + return + } + + // Check if pool is closed before attempting to send + if atomic.LoadUint32(&p.pool.closed) == 1 { + conn.Close() + return + } + + // Try to return to pool (non-blocking) + select { + case p.pool.conns <- conn: + // Successfully returned to pool + default: + // Pool is full, close the connection + conn.Close() + } +} + +// createConnection establishes a new connection to the LDAP server +func (p *LDAPProvider) createConnection() (*ldap.Conn, error) { + var conn *ldap.Conn + var err error + + // Create dialer with timeout + dialer := &net.Dialer{Timeout: p.config.ConnectionTimeout} + + // Parse server URL + if strings.HasPrefix(p.config.Server, "ldaps://") { + // LDAPS connection + tlsConfig := &tls.Config{ + InsecureSkipVerify: p.config.InsecureSkipVerify, + MinVersion: tls.VersionTLS12, + } + conn, err = ldap.DialURL(p.config.Server, ldap.DialWithDialer(dialer), ldap.DialWithTLSConfig(tlsConfig)) + } else { + // LDAP connection + conn, err = ldap.DialURL(p.config.Server, ldap.DialWithDialer(dialer)) + if err == nil && p.config.UseTLS { + // StartTLS + tlsConfig := &tls.Config{ + InsecureSkipVerify: p.config.InsecureSkipVerify, + MinVersion: tls.VersionTLS12, + } + if err = conn.StartTLS(tlsConfig); err != nil { + conn.Close() + return nil, fmt.Errorf("failed to start TLS: %w", err) + } + } + } + + if err != nil { + return nil, fmt.Errorf("failed to connect to LDAP server: %w", err) + } + + return conn, nil +} + +// Close closes all connections in the pool +func (p *LDAPProvider) Close() error { + if p.pool == nil { + return nil + } + + // Atomically mark pool as closed to prevent new connections being returned + if !atomic.CompareAndSwapUint32(&p.pool.closed, 0, 1) { + // Already closed + return nil + } + + p.pool.mu.Lock() + defer p.pool.mu.Unlock() + + // Now safe to close the channel since closed flag prevents new sends + close(p.pool.conns) + for conn := range p.pool.conns { + if conn != nil { + conn.Close() + } + } + return nil +} + +// Authenticate authenticates a user with username:password credentials +func (p *LDAPProvider) Authenticate(ctx context.Context, credentials string) (*providers.ExternalIdentity, error) { + p.mu.RLock() + if !p.initialized { + p.mu.RUnlock() + return nil, fmt.Errorf("LDAP provider not initialized") + } + config := p.config + p.mu.RUnlock() + + // Parse credentials (username:password format) + parts := strings.SplitN(credentials, ":", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid credentials format (expected username:password)") + } + username, password := parts[0], parts[1] + + if username == "" || password == "" { + return nil, fmt.Errorf("username and password are required") + } + + // Get connection from pool + conn, err := p.getConnection() + if err != nil { + return nil, err + } + // Note: defer returnConnection moved to after rebinding to service account + + // First, bind with service account to search for user + if config.BindDN != "" { + err = conn.Bind(config.BindDN, config.BindPassword) + if err != nil { + glog.V(2).Infof("LDAP service bind failed: %v", err) + conn.Close() // Close on error, don't return to pool + return nil, fmt.Errorf("LDAP service bind failed: %w", err) + } + } + + // Search for the user + userFilter := fmt.Sprintf(config.UserFilter, ldap.EscapeFilter(username)) + searchRequest := ldap.NewSearchRequest( + config.BaseDN, + ldap.ScopeWholeSubtree, + ldap.NeverDerefAliases, + 1, // Size limit + int(config.ConnectionTimeout.Seconds()), + false, + userFilter, + []string{"dn", config.Attributes.Email, config.Attributes.DisplayName, config.Attributes.UID, config.Attributes.Groups}, + nil, + ) + + result, err := conn.Search(searchRequest) + if err != nil { + glog.V(2).Infof("LDAP user search failed: %v", err) + conn.Close() // Close on error + return nil, fmt.Errorf("LDAP user search failed: %w", err) + } + + if len(result.Entries) == 0 { + conn.Close() // Close on error + return nil, fmt.Errorf("user not found") + } + if len(result.Entries) > 1 { + conn.Close() // Close on error + return nil, fmt.Errorf("multiple users found") + } + + userEntry := result.Entries[0] + userDN := userEntry.DN + + // Bind as the user to verify password + err = conn.Bind(userDN, password) + if err != nil { + glog.V(2).Infof("LDAP user bind failed for %s: %v", username, err) + conn.Close() // Close on error, don't return to pool + return nil, fmt.Errorf("authentication failed: invalid credentials") + } + + // Rebind to service account before returning connection to pool + // This prevents pool corruption from authenticated user binds + if config.BindDN != "" { + if err = conn.Bind(config.BindDN, config.BindPassword); err != nil { + glog.V(2).Infof("LDAP rebind to service account failed: %v", err) + conn.Close() // Close on error, don't return to pool + return nil, fmt.Errorf("LDAP service account rebind failed after successful user authentication (check bindDN %q and its credentials): %w", config.BindDN, err) + } + } + // Now safe to defer return to pool with clean service account binding + defer p.returnConnection(conn) + + // Build identity from LDAP attributes + identity := &providers.ExternalIdentity{ + UserID: username, + Email: userEntry.GetAttributeValue(config.Attributes.Email), + DisplayName: userEntry.GetAttributeValue(config.Attributes.DisplayName), + Groups: userEntry.GetAttributeValues(config.Attributes.Groups), + Provider: p.name, + Attributes: map[string]string{ + "dn": userDN, + "uid": userEntry.GetAttributeValue(config.Attributes.UID), + }, + } + + // If no groups from memberOf, try group search + if len(identity.Groups) == 0 && config.GroupFilter != "" { + groups, err := p.searchUserGroups(conn, userDN, config) + if err != nil { + glog.V(2).Infof("Group search failed for %s: %v", username, err) + } else { + identity.Groups = groups + } + } + + glog.V(2).Infof("LDAP authentication successful for user: %s, groups: %v", username, identity.Groups) + return identity, nil +} + +// searchUserGroups searches for groups the user belongs to +func (p *LDAPProvider) searchUserGroups(conn *ldap.Conn, userDN string, config *LDAPConfig) ([]string, error) { + groupFilter := fmt.Sprintf(config.GroupFilter, ldap.EscapeFilter(userDN)) + searchRequest := ldap.NewSearchRequest( + config.GroupBaseDN, + ldap.ScopeWholeSubtree, + ldap.NeverDerefAliases, + 0, + int(config.ConnectionTimeout.Seconds()), + false, + groupFilter, + []string{"cn", "dn"}, + nil, + ) + + result, err := conn.Search(searchRequest) + if err != nil { + return nil, err + } + + var groups []string + for _, entry := range result.Entries { + cn := entry.GetAttributeValue("cn") + if cn != "" { + groups = append(groups, cn) + } + } + + return groups, nil +} + +// GetUserInfo retrieves user information by user ID +func (p *LDAPProvider) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) { + p.mu.RLock() + if !p.initialized { + p.mu.RUnlock() + return nil, fmt.Errorf("LDAP provider not initialized") + } + config := p.config + p.mu.RUnlock() + + // Get connection from pool + conn, err := p.getConnection() + if err != nil { + return nil, err + } + // Note: defer returnConnection moved to after bind + + // Bind with service account + if config.BindDN != "" { + err = conn.Bind(config.BindDN, config.BindPassword) + if err != nil { + conn.Close() // Close on bind failure + return nil, fmt.Errorf("LDAP service bind failed: %w", err) + } + } + defer p.returnConnection(conn) + + // Search for the user + userFilter := fmt.Sprintf(config.UserFilter, ldap.EscapeFilter(userID)) + searchRequest := ldap.NewSearchRequest( + config.BaseDN, + ldap.ScopeWholeSubtree, + ldap.NeverDerefAliases, + 1, + int(config.ConnectionTimeout.Seconds()), + false, + userFilter, + []string{"dn", config.Attributes.Email, config.Attributes.DisplayName, config.Attributes.UID, config.Attributes.Groups}, + nil, + ) + + result, err := conn.Search(searchRequest) + if err != nil { + return nil, fmt.Errorf("LDAP user search failed: %w", err) + } + + if len(result.Entries) == 0 { + return nil, fmt.Errorf("user not found") + } + if len(result.Entries) > 1 { + return nil, fmt.Errorf("multiple users found") + } + + userEntry := result.Entries[0] + identity := &providers.ExternalIdentity{ + UserID: userID, + Email: userEntry.GetAttributeValue(config.Attributes.Email), + DisplayName: userEntry.GetAttributeValue(config.Attributes.DisplayName), + Groups: userEntry.GetAttributeValues(config.Attributes.Groups), + Provider: p.name, + Attributes: map[string]string{ + "dn": userEntry.DN, + "uid": userEntry.GetAttributeValue(config.Attributes.UID), + }, + } + + // If no groups from memberOf, try group search + if len(identity.Groups) == 0 && config.GroupFilter != "" { + groups, err := p.searchUserGroups(conn, userEntry.DN, config) + if err != nil { + glog.V(2).Infof("Group search failed for %s: %v", userID, err) + } else { + identity.Groups = groups + } + } + + return identity, nil +} + +// ValidateToken validates credentials (username:password format) and returns claims +func (p *LDAPProvider) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) { + identity, err := p.Authenticate(ctx, token) + if err != nil { + return nil, err + } + + p.mu.RLock() + config := p.config + p.mu.RUnlock() + + // If audience is configured, validate it (consistent with OIDC approach) + audience := p.name + if config.Audience != "" { + audience = config.Audience + } + + // Populate standard TokenClaims fields for interface compliance + now := time.Now() + ttl := 1 * time.Hour // Default TTL for LDAP tokens + + return &providers.TokenClaims{ + Subject: identity.UserID, + Issuer: p.name, + Audience: audience, + IssuedAt: now, + ExpiresAt: now.Add(ttl), + Claims: map[string]interface{}{ + "email": identity.Email, + "name": identity.DisplayName, + "groups": identity.Groups, + "dn": identity.Attributes["dn"], + "provider": p.name, + }, + }, nil +} + +// IsInitialized returns whether the provider is initialized +func (p *LDAPProvider) IsInitialized() bool { + p.mu.RLock() + defer p.mu.RUnlock() + return p.initialized +} diff --git a/weed/iam/sts/cross_instance_token_test.go b/weed/iam/sts/cross_instance_token_test.go index c628d5e0d..8a375a885 100644 --- a/weed/iam/sts/cross_instance_token_test.go +++ b/weed/iam/sts/cross_instance_token_test.go @@ -127,16 +127,16 @@ func TestCrossInstanceTokenUsage(t *testing.T) { sessionId := TestSessionID expiresAt := time.Now().Add(time.Hour) - tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + tokenFromA, err := instanceA.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err, "Instance A should generate token") // Validate token on Instance B - claimsFromB, err := instanceB.tokenGenerator.ValidateSessionToken(tokenFromA) + claimsFromB, err := instanceB.GetTokenGenerator().ValidateSessionToken(tokenFromA) require.NoError(t, err, "Instance B should validate token from Instance A") assert.Equal(t, sessionId, claimsFromB.SessionId, "Session ID should match") // Validate same token on Instance C - claimsFromC, err := instanceC.tokenGenerator.ValidateSessionToken(tokenFromA) + claimsFromC, err := instanceC.GetTokenGenerator().ValidateSessionToken(tokenFromA) require.NoError(t, err, "Instance C should validate token from Instance A") assert.Equal(t, sessionId, claimsFromC.SessionId, "Session ID should match") @@ -295,15 +295,15 @@ func TestSTSDistributedConfigurationRequirements(t *testing.T) { // Generate token on Instance A sessionId := "test-session" expiresAt := time.Now().Add(time.Hour) - tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + tokenFromA, err := instanceA.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err) // Instance A should validate its own token - _, err = instanceA.tokenGenerator.ValidateSessionToken(tokenFromA) + _, err = instanceA.GetTokenGenerator().ValidateSessionToken(tokenFromA) assert.NoError(t, err, "Instance A should validate own token") // Instance B should REJECT token due to different signing key - _, err = instanceB.tokenGenerator.ValidateSessionToken(tokenFromA) + _, err = instanceB.GetTokenGenerator().ValidateSessionToken(tokenFromA) assert.Error(t, err, "Instance B should reject token with different signing key") assert.Contains(t, err.Error(), "invalid token", "Should be signature validation error") }) @@ -339,11 +339,11 @@ func TestSTSDistributedConfigurationRequirements(t *testing.T) { // Generate token on Instance A sessionId := "test-session" expiresAt := time.Now().Add(time.Hour) - tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + tokenFromA, err := instanceA.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err) // Instance B should REJECT token due to different issuer - _, err = instanceB.tokenGenerator.ValidateSessionToken(tokenFromA) + _, err = instanceB.GetTokenGenerator().ValidateSessionToken(tokenFromA) assert.Error(t, err, "Instance B should reject token with different issuer") assert.Contains(t, err.Error(), "invalid issuer", "Should be issuer validation error") }) @@ -368,12 +368,12 @@ func TestSTSDistributedConfigurationRequirements(t *testing.T) { // Generate token on Instance 0 sessionId := "multi-instance-test" expiresAt := time.Now().Add(time.Hour) - token, err := instances[0].tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + token, err := instances[0].GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err) // All other instances should validate the token for i := 1; i < 5; i++ { - claims, err := instances[i].tokenGenerator.ValidateSessionToken(token) + claims, err := instances[i].GetTokenGenerator().ValidateSessionToken(token) require.NoError(t, err, "Instance %d should validate token", i) assert.Equal(t, sessionId, claims.SessionId, "Instance %d should extract correct session ID", i) } @@ -486,10 +486,10 @@ func TestSTSRealWorldDistributedScenarios(t *testing.T) { assert.True(t, sessionInfo3.ExpiresAt.After(time.Now()), "Session should not be expired") // Step 5: Token should be identical when parsed - claims2, err := gateway2.tokenGenerator.ValidateSessionToken(sessionToken) + claims2, err := gateway2.GetTokenGenerator().ValidateSessionToken(sessionToken) require.NoError(t, err) - claims3, err := gateway3.tokenGenerator.ValidateSessionToken(sessionToken) + claims3, err := gateway3.GetTokenGenerator().ValidateSessionToken(sessionToken) require.NoError(t, err) assert.Equal(t, claims2.SessionId, claims3.SessionId, "Session IDs should match") diff --git a/weed/iam/sts/distributed_sts_test.go b/weed/iam/sts/distributed_sts_test.go index 133f3a669..7997e7b8e 100644 --- a/weed/iam/sts/distributed_sts_test.go +++ b/weed/iam/sts/distributed_sts_test.go @@ -109,9 +109,9 @@ func TestDistributedSTSService(t *testing.T) { expiresAt := time.Now().Add(time.Hour) // Generate tokens from different instances - token1, err1 := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - token2, err2 := instance2.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - token3, err3 := instance3.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + token1, err1 := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) + token2, err2 := instance2.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) + token3, err3 := instance3.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err1, "Instance 1 token generation should succeed") require.NoError(t, err2, "Instance 2 token generation should succeed") @@ -130,13 +130,13 @@ func TestDistributedSTSService(t *testing.T) { expiresAt := time.Now().Add(time.Hour) // Generate token on instance 1 - token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + token, err := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err) // Validate on all instances - claims1, err1 := instance1.tokenGenerator.ValidateSessionToken(token) - claims2, err2 := instance2.tokenGenerator.ValidateSessionToken(token) - claims3, err3 := instance3.tokenGenerator.ValidateSessionToken(token) + claims1, err1 := instance1.GetTokenGenerator().ValidateSessionToken(token) + claims2, err2 := instance2.GetTokenGenerator().ValidateSessionToken(token) + claims3, err3 := instance3.GetTokenGenerator().ValidateSessionToken(token) require.NoError(t, err1, "Instance 1 should validate token from instance 1") require.NoError(t, err2, "Instance 2 should validate token from instance 1") @@ -216,15 +216,15 @@ func TestSTSConfigurationValidation(t *testing.T) { // Generate token on instance 1 sessionId := "test-session" expiresAt := time.Now().Add(time.Hour) - token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + token, err := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err) // Instance 1 should validate its own token - _, err = instance1.tokenGenerator.ValidateSessionToken(token) + _, err = instance1.GetTokenGenerator().ValidateSessionToken(token) assert.NoError(t, err, "Instance 1 should validate its own token") // Instance 2 should reject token from instance 1 (different signing key) - _, err = instance2.tokenGenerator.ValidateSessionToken(token) + _, err = instance2.GetTokenGenerator().ValidateSessionToken(token) assert.Error(t, err, "Instance 2 should reject token with different signing key") }) @@ -258,12 +258,12 @@ func TestSTSConfigurationValidation(t *testing.T) { // Generate token on instance 1 sessionId := "test-session" expiresAt := time.Now().Add(time.Hour) - token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) + token, err := instance1.GetTokenGenerator().GenerateSessionToken(sessionId, expiresAt) require.NoError(t, err) // Instance 2 should reject token due to issuer mismatch // (Even though signing key is the same, issuer validation will fail) - _, err = instance2.tokenGenerator.ValidateSessionToken(token) + _, err = instance2.GetTokenGenerator().ValidateSessionToken(token) assert.Error(t, err, "Instance 2 should reject token with different issuer") }) } diff --git a/weed/iam/sts/provider_factory.go b/weed/iam/sts/provider_factory.go index 83808c58f..53635c8f2 100644 --- a/weed/iam/sts/provider_factory.go +++ b/weed/iam/sts/provider_factory.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/iam/ldap" "github.com/seaweedfs/seaweedfs/weed/iam/oidc" "github.com/seaweedfs/seaweedfs/weed/iam/providers" ) @@ -66,8 +67,11 @@ func (f *ProviderFactory) createOIDCProvider(config *ProviderConfig) (providers. // createLDAPProvider creates an LDAP provider from configuration func (f *ProviderFactory) createLDAPProvider(config *ProviderConfig) (providers.IdentityProvider, error) { - // TODO: Implement LDAP provider when available - return nil, fmt.Errorf("LDAP provider not implemented yet") + provider := ldap.NewLDAPProvider(config.Name) + if err := provider.Initialize(config.Config); err != nil { + return nil, fmt.Errorf("failed to initialize LDAP provider: %w", err) + } + return provider, nil } // createSAMLProvider creates a SAML provider from configuration @@ -317,7 +321,12 @@ func (f *ProviderFactory) validateOIDCConfig(config map[string]interface{}) erro // validateLDAPConfig validates LDAP provider configuration func (f *ProviderFactory) validateLDAPConfig(config map[string]interface{}) error { - // TODO: Implement when LDAP provider is available + if _, ok := config["server"]; !ok { + return fmt.Errorf("LDAP provider requires 'server' field") + } + if _, ok := config["baseDN"]; !ok { + return fmt.Errorf("LDAP provider requires 'baseDN' field") + } return nil } diff --git a/weed/iam/sts/sts_service.go b/weed/iam/sts/sts_service.go index 1d3716099..f87038fc8 100644 --- a/weed/iam/sts/sts_service.go +++ b/weed/iam/sts/sts_service.go @@ -81,6 +81,12 @@ type STSService struct { trustPolicyValidator TrustPolicyValidator // Interface for trust policy validation } +// GetTokenGenerator returns the token generator used by the STS service. +// This keeps the underlying field unexported while still allowing read-only access. +func (s *STSService) GetTokenGenerator() *TokenGenerator { + return s.tokenGenerator +} + // STSConfig holds STS service configuration type STSConfig struct { // TokenDuration is the default duration for issued tokens @@ -95,6 +101,10 @@ type STSConfig struct { // SigningKey is used to sign session tokens SigningKey []byte `json:"signingKey"` + // AccountId is the AWS account ID used for federated user ARNs + // Defaults to "111122223333" if not specified + AccountId string `json:"accountId,omitempty"` + // Providers configuration - enables automatic provider loading Providers []*ProviderConfig `json:"providers,omitempty"` } @@ -807,7 +817,7 @@ func (s *STSService) calculateSessionDuration(durationSeconds *int64, tokenExpir // extractSessionIdFromToken extracts session ID from JWT session token func (s *STSService) extractSessionIdFromToken(sessionToken string) string { - // Parse JWT and extract session ID from claims + // Validate JWT and extract session claims claims, err := s.tokenGenerator.ValidateJWTWithClaims(sessionToken) if err != nil { // For test compatibility, also handle direct session IDs @@ -862,7 +872,7 @@ func (s *STSService) ExpireSessionForTesting(ctx context.Context, sessionToken s return fmt.Errorf("session token cannot be empty") } - // Validate JWT token format + // Just validate the signature _, err := s.tokenGenerator.ValidateJWTWithClaims(sessionToken) if err != nil { return fmt.Errorf("invalid session token format: %w", err) diff --git a/weed/s3api/auth_credentials_trust.go b/weed/s3api/auth_credentials_trust.go new file mode 100644 index 000000000..e37cda328 --- /dev/null +++ b/weed/s3api/auth_credentials_trust.go @@ -0,0 +1,15 @@ +package s3api + +import ( + "context" + "fmt" +) + +// ValidateTrustPolicyForPrincipal validates if a principal is allowed to assume a role +// Delegates to the IAM integration if available +func (iam *IdentityAccessManagement) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error { + if iam.iamIntegration != nil { + return iam.iamIntegration.ValidateTrustPolicyForPrincipal(ctx, roleArn, principalArn) + } + return fmt.Errorf("IAM integration not available") +} diff --git a/weed/s3api/auth_signature_v4_sts_test.go b/weed/s3api/auth_signature_v4_sts_test.go index 91051440d..6cca0cdd6 100644 --- a/weed/s3api/auth_signature_v4_sts_test.go +++ b/weed/s3api/auth_signature_v4_sts_test.go @@ -16,8 +16,9 @@ import ( // MockIAMIntegration is a mock implementation of IAM integration for testing type MockIAMIntegration struct { - authorizeFunc func(ctx context.Context, identity *IAMIdentity, action Action, bucket, object string, r *http.Request) s3err.ErrorCode - authCalled bool + authorizeFunc func(ctx context.Context, identity *IAMIdentity, action Action, bucket, object string, r *http.Request) s3err.ErrorCode + validateTrustPolicyFunc func(ctx context.Context, roleArn, principalArn string) error + authCalled bool } func (m *MockIAMIntegration) AuthorizeAction(ctx context.Context, identity *IAMIdentity, action Action, bucket, object string, r *http.Request) s3err.ErrorCode { @@ -36,6 +37,13 @@ func (m *MockIAMIntegration) ValidateSessionToken(ctx context.Context, token str return nil, nil // Not needed for these tests } +func (m *MockIAMIntegration) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error { + if m.validateTrustPolicyFunc != nil { + return m.validateTrustPolicyFunc(ctx, roleArn, principalArn) + } + return nil +} + // TestVerifyV4SignatureWithSTSIdentity tests that verifyV4Signature properly handles STS identities // by falling back to IAM authorization when shouldCheckPermissions is true func TestVerifyV4SignatureWithSTSIdentity(t *testing.T) { diff --git a/weed/s3api/s3_end_to_end_test.go b/weed/s3api/s3_end_to_end_test.go index 83943b1cc..3fa20194d 100644 --- a/weed/s3api/s3_end_to_end_test.go +++ b/weed/s3api/s3_end_to_end_test.go @@ -477,7 +477,7 @@ func setupS3ReadOnlyRole(ctx context.Context, manager *integration.IAMManager) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -521,7 +521,7 @@ func setupS3AdminRole(ctx context.Context, manager *integration.IAMManager) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -565,7 +565,7 @@ func setupS3WriteRole(ctx context.Context, manager *integration.IAMManager) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -614,7 +614,7 @@ func setupS3IPRestrictedRole(ctx context.Context, manager *integration.IAMManage { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, diff --git a/weed/s3api/s3_iam_middleware.go b/weed/s3api/s3_iam_middleware.go index 3548b58a7..5898617b0 100644 --- a/weed/s3api/s3_iam_middleware.go +++ b/weed/s3api/s3_iam_middleware.go @@ -23,6 +23,7 @@ type IAMIntegration interface { AuthenticateJWT(ctx context.Context, r *http.Request) (*IAMIdentity, s3err.ErrorCode) AuthorizeAction(ctx context.Context, identity *IAMIdentity, action Action, bucket string, objectKey string, r *http.Request) s3err.ErrorCode ValidateSessionToken(ctx context.Context, token string) (*sts.SessionInfo, error) + ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error } // S3IAMIntegration provides IAM integration for S3 API @@ -224,6 +225,14 @@ func (s3iam *S3IAMIntegration) AuthorizeAction(ctx context.Context, identity *IA return s3err.ErrNone } +// ValidateTrustPolicyForPrincipal delegates to IAMManager to validate trust policy +func (s3iam *S3IAMIntegration) ValidateTrustPolicyForPrincipal(ctx context.Context, roleArn, principalArn string) error { + if s3iam.iamManager == nil { + return fmt.Errorf("IAM manager not available") + } + return s3iam.iamManager.ValidateTrustPolicyForPrincipal(ctx, roleArn, principalArn) +} + // IAMIdentity represents an authenticated identity with session information type IAMIdentity struct { Name string diff --git a/weed/s3api/s3_jwt_auth_test.go b/weed/s3api/s3_jwt_auth_test.go index afed20671..ccae1827f 100644 --- a/weed/s3api/s3_jwt_auth_test.go +++ b/weed/s3api/s3_jwt_auth_test.go @@ -387,7 +387,7 @@ func setupTestReadOnlyRole(ctx context.Context, manager *integration.IAMManager) { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -405,7 +405,7 @@ func setupTestReadOnlyRole(ctx context.Context, manager *integration.IAMManager) { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -449,7 +449,7 @@ func setupTestAdminRole(ctx context.Context, manager *integration.IAMManager) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -467,7 +467,7 @@ func setupTestAdminRole(ctx context.Context, manager *integration.IAMManager) { { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -510,7 +510,7 @@ func setupTestIPRestrictedRole(ctx context.Context, manager *integration.IAMMana { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, diff --git a/weed/s3api/s3_multipart_iam_test.go b/weed/s3api/s3_multipart_iam_test.go index 5717393b1..7169891c0 100644 --- a/weed/s3api/s3_multipart_iam_test.go +++ b/weed/s3api/s3_multipart_iam_test.go @@ -568,7 +568,7 @@ func setupTestRolesForMultipart(ctx context.Context, manager *integration.IAMMan { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -586,7 +586,7 @@ func setupTestRolesForMultipart(ctx context.Context, manager *integration.IAMMan { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, diff --git a/weed/s3api/s3_presigned_url_iam_test.go b/weed/s3api/s3_presigned_url_iam_test.go index 8690dc904..2a2686f7b 100644 --- a/weed/s3api/s3_presigned_url_iam_test.go +++ b/weed/s3api/s3_presigned_url_iam_test.go @@ -521,7 +521,7 @@ func setupTestRolesForPresigned(ctx context.Context, manager *integration.IAMMan { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -557,7 +557,7 @@ func setupTestRolesForPresigned(ctx context.Context, manager *integration.IAMMan { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, @@ -575,7 +575,7 @@ func setupTestRolesForPresigned(ctx context.Context, manager *integration.IAMMan { Effect: "Allow", Principal: map[string]interface{}{ - "Federated": "https://test-issuer.com", + "Federated": "test-oidc", }, Action: []string{"sts:AssumeRoleWithWebIdentity"}, }, diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 530a8af4b..035560020 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -190,7 +190,7 @@ func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, expl // Initialize STS HTTP handlers for AssumeRoleWithWebIdentity endpoint if stsService := iamManager.GetSTSService(); stsService != nil { - s3ApiServer.stsHandlers = NewSTSHandlers(stsService) + s3ApiServer.stsHandlers = NewSTSHandlers(stsService, iam) glog.V(1).Infof("STS HTTP handlers initialized for AssumeRoleWithWebIdentity") } @@ -622,7 +622,16 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // 1. Explicit query param match (highest priority) apiRouter.Methods(http.MethodPost).Path("/").Queries("Action", "AssumeRoleWithWebIdentity"). HandlerFunc(track(s3a.stsHandlers.HandleSTSRequest, "STS")) - glog.V(0).Infof("STS API enabled on S3 port (AssumeRoleWithWebIdentity)") + + // AssumeRole - requires SigV4 authentication + apiRouter.Methods(http.MethodPost).Path("/").Queries("Action", "AssumeRole"). + HandlerFunc(track(s3a.stsHandlers.HandleSTSRequest, "STS-AssumeRole")) + + // AssumeRoleWithLDAPIdentity - uses LDAP credentials + apiRouter.Methods(http.MethodPost).Path("/").Queries("Action", "AssumeRoleWithLDAPIdentity"). + HandlerFunc(track(s3a.stsHandlers.HandleSTSRequest, "STS-LDAP")) + + glog.V(0).Infof("STS API enabled on S3 port (AssumeRole, AssumeRoleWithWebIdentity, AssumeRoleWithLDAPIdentity)") } // Embedded IAM API endpoint @@ -631,10 +640,31 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { if s3a.embeddedIam != nil { // 2. Authenticated IAM requests // Only match if the request appears to be authenticated (AWS Signature) - // This prevents unauthenticated STS requests (like AssumeRoleWithWebIdentity in body) - // from being captured by the IAM handler which would reject them. + // AND is not an STS request (which should be handled by STS handlers) iamMatcher := func(r *http.Request, rm *mux.RouteMatch) bool { - return getRequestAuthType(r) != authTypeAnonymous + if getRequestAuthType(r) == authTypeAnonymous { + return false + } + + // Check Action parameter in both form data and query string + // We iterate ParseForm but ignore errors to ensure we attempt to parse the body + // even if it's malformed, then check FormValue which covers both body and query. + // This guards against misrouting STS requests if the body is invalid. + r.ParseForm() + action := r.FormValue("Action") + + // If FormValue yielded nothing (possibly due to ParseForm failure failing to populate Form), + // explicitly fallback to Query string to be safe. + if action == "" { + action = r.URL.Query().Get("Action") + } + + // Exclude STS actions - let them be handled by STS handlers + if action == "AssumeRole" || action == "AssumeRoleWithWebIdentity" || action == "AssumeRoleWithLDAPIdentity" { + return false + } + + return true } apiRouter.Methods(http.MethodPost).Path("/").MatcherFunc(iamMatcher). diff --git a/weed/s3api/s3api_server_routing_test.go b/weed/s3api/s3api_server_routing_test.go index 5aed24d39..2746d59fe 100644 --- a/weed/s3api/s3api_server_routing_test.go +++ b/weed/s3api/s3api_server_routing_test.go @@ -150,8 +150,8 @@ func TestRouting_IAMMatcherLogic(t *testing.T) { name: "AWS4 signature with STS action in body", authHeader: "AWS4-HMAC-SHA256 Credential=AKIA.../...", queryParams: "", - expectsIAM: true, - description: "Authenticated STS action should still route to IAM (auth takes precedence)", + expectsIAM: false, + description: "Authenticated STS action should route to STS handler (STS handlers handle their own auth)", }, } diff --git a/weed/s3api/s3api_sts.go b/weed/s3api/s3api_sts.go index 914f962ff..943e67929 100644 --- a/weed/s3api/s3api_sts.go +++ b/weed/s3api/s3api_sts.go @@ -5,6 +5,8 @@ package s3api // AWS SDKs to obtain temporary credentials using OIDC/JWT tokens. import ( + "crypto/rand" + "encoding/base64" "encoding/xml" "errors" "fmt" @@ -13,7 +15,9 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/iam/ldap" "github.com/seaweedfs/seaweedfs/weed/iam/sts" + "github.com/seaweedfs/seaweedfs/weed/iam/utils" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" ) @@ -28,18 +32,61 @@ const ( stsDurationSeconds = "DurationSeconds" // STS Action names - actionAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + actionAssumeRole = "AssumeRole" + actionAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + actionAssumeRoleWithLDAPIdentity = "AssumeRoleWithLDAPIdentity" + + // LDAP parameter names + stsLDAPUsername = "LDAPUsername" + stsLDAPPassword = "LDAPPassword" + stsLDAPProviderName = "LDAPProviderName" ) +// STS duration constants (AWS specification) +const ( + minDurationSeconds = int64(900) // 15 minutes + maxDurationSeconds = int64(43200) // 12 hours + + // Default account ID for federated users + defaultAccountId = "111122223333" +) + +// parseDurationSeconds parses and validates the DurationSeconds parameter +// Returns nil if the parameter is not provided, or a pointer to the parsed value +func parseDurationSeconds(r *http.Request) (*int64, STSErrorCode, error) { + dsStr := r.FormValue("DurationSeconds") + if dsStr == "" { + return nil, "", nil + } + + ds, err := strconv.ParseInt(dsStr, 10, 64) + if err != nil { + return nil, STSErrInvalidParameterValue, fmt.Errorf("invalid DurationSeconds: %w", err) + } + + if ds < minDurationSeconds || ds > maxDurationSeconds { + return nil, STSErrInvalidParameterValue, + fmt.Errorf("DurationSeconds must be between %d and %d seconds", minDurationSeconds, maxDurationSeconds) + } + + return &ds, "", nil +} + +// Removed generateSecureCredentials - now using STS service's JWT token generation +// The STS service generates proper JWT tokens with embedded claims that can be validated +// across distributed instances without shared state. + // STSHandlers provides HTTP handlers for STS operations type STSHandlers struct { stsService *sts.STSService + iam *IdentityAccessManagement } // NewSTSHandlers creates a new STSHandlers instance -func NewSTSHandlers(stsService *sts.STSService) *STSHandlers { +func NewSTSHandlers(stsService *sts.STSService, iam *IdentityAccessManagement) *STSHandlers { return &STSHandlers{ stsService: stsService, + iam: iam, } } @@ -62,8 +109,12 @@ func (h *STSHandlers) HandleSTSRequest(w http.ResponseWriter, r *http.Request) { // Route based on action action := r.Form.Get(stsAction) switch action { + case actionAssumeRole: + h.handleAssumeRole(w, r) case actionAssumeRoleWithWebIdentity: h.handleAssumeRoleWithWebIdentity(w, r) + case actionAssumeRoleWithLDAPIdentity: + h.handleAssumeRoleWithLDAPIdentity(w, r) default: h.writeSTSErrorResponse(w, r, STSErrInvalidAction, fmt.Errorf("unsupported action: %s", action)) @@ -98,29 +149,11 @@ func (h *STSHandlers) handleAssumeRoleWithWebIdentity(w http.ResponseWriter, r * return } - // Parse and validate DurationSeconds - var durationSeconds *int64 - if dsStr := r.FormValue("DurationSeconds"); dsStr != "" { - ds, err := strconv.ParseInt(dsStr, 10, 64) - if err != nil { - h.writeSTSErrorResponse(w, r, STSErrInvalidParameterValue, - fmt.Errorf("invalid DurationSeconds: %w", err)) - return - } - - // Enforce AWS STS-compatible duration range for AssumeRoleWithWebIdentity - // AWS allows 900 seconds (15 minutes) to 43200 seconds (12 hours) - const ( - minDurationSeconds = int64(900) - maxDurationSeconds = int64(43200) - ) - if ds < minDurationSeconds || ds > maxDurationSeconds { - h.writeSTSErrorResponse(w, r, STSErrInvalidParameterValue, - fmt.Errorf("DurationSeconds must be between %d and %d seconds", minDurationSeconds, maxDurationSeconds)) - return - } - - durationSeconds = &ds + // Parse and validate DurationSeconds using helper + durationSeconds, errCode, err := parseDurationSeconds(r) + if err != nil { + h.writeSTSErrorResponse(w, r, errCode, err) + return } // Check if STS service is initialized @@ -179,6 +212,322 @@ func (h *STSHandlers) handleAssumeRoleWithWebIdentity(w http.ResponseWriter, r * s3err.WriteXMLResponse(w, r, http.StatusOK, xmlResponse) } +// handleAssumeRole handles the AssumeRole API action +// This requires AWS Signature V4 authentication +func (h *STSHandlers) handleAssumeRole(w http.ResponseWriter, r *http.Request) { + // Extract parameters from form + roleArn := r.FormValue("RoleArn") + roleSessionName := r.FormValue("RoleSessionName") + + // Validate required parameters + if roleArn == "" { + h.writeSTSErrorResponse(w, r, STSErrMissingParameter, + fmt.Errorf("RoleArn is required")) + return + } + + if roleSessionName == "" { + h.writeSTSErrorResponse(w, r, STSErrMissingParameter, + fmt.Errorf("RoleSessionName is required")) + return + } + + // Parse and validate DurationSeconds using helper + durationSeconds, errCode, err := parseDurationSeconds(r) + if err != nil { + h.writeSTSErrorResponse(w, r, errCode, err) + return + } + + // Check if STS service is initialized + if h.stsService == nil || !h.stsService.IsInitialized() { + h.writeSTSErrorResponse(w, r, STSErrSTSNotReady, + fmt.Errorf("STS service not initialized")) + return + } + + // Check if IAM is available for SigV4 verification + if h.iam == nil { + h.writeSTSErrorResponse(w, r, STSErrSTSNotReady, + fmt.Errorf("IAM not configured for STS")) + return + } + + // Validate AWS SigV4 authentication + identity, _, _, _, sigErrCode := h.iam.verifyV4Signature(r, false) + if sigErrCode != s3err.ErrNone { + glog.V(2).Infof("AssumeRole SigV4 verification failed: %v", sigErrCode) + h.writeSTSErrorResponse(w, r, STSErrAccessDenied, + fmt.Errorf("invalid AWS signature: %v", sigErrCode)) + return + } + + if identity == nil { + h.writeSTSErrorResponse(w, r, STSErrAccessDenied, + fmt.Errorf("unable to identify caller")) + return + } + + glog.V(2).Infof("AssumeRole: caller identity=%s, roleArn=%s, sessionName=%s", + identity.Name, roleArn, roleSessionName) + + // Check if the caller is authorized to assume the role (sts:AssumeRole permission) + // This validates that the caller has a policy allowing sts:AssumeRole on the target role + if authErr := h.iam.VerifyActionPermission(r, identity, Action("sts:AssumeRole"), "", roleArn); authErr != s3err.ErrNone { + glog.V(2).Infof("AssumeRole: caller %s is not authorized to assume role %s", identity.Name, roleArn) + h.writeSTSErrorResponse(w, r, STSErrAccessDenied, + fmt.Errorf("user %s is not authorized to assume role %s", identity.Name, roleArn)) + return + } + + // Validate that the target role trusts the caller (Trust Policy) + // This ensures the role's trust policy explicitly allows the principal to assume it + if err := h.iam.ValidateTrustPolicyForPrincipal(r.Context(), roleArn, identity.PrincipalArn); err != nil { + glog.V(2).Infof("AssumeRole: trust policy validation failed for %s to assume %s: %v", identity.Name, roleArn, err) + h.writeSTSErrorResponse(w, r, STSErrAccessDenied, fmt.Errorf("trust policy denies access")) + return + } + + // Generate common STS components + stsCreds, assumedUser, err := h.prepareSTSCredentials(roleArn, roleSessionName, identity.PrincipalArn, durationSeconds, nil) + if err != nil { + h.writeSTSErrorResponse(w, r, STSErrInternalError, err) + return + } + + // Build and return response + xmlResponse := &AssumeRoleResponse{ + Result: AssumeRoleResult{ + Credentials: stsCreds, + AssumedRoleUser: assumedUser, + }, + } + xmlResponse.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano()) + + s3err.WriteXMLResponse(w, r, http.StatusOK, xmlResponse) +} + +// handleAssumeRoleWithLDAPIdentity handles the AssumeRoleWithLDAPIdentity API action +func (h *STSHandlers) handleAssumeRoleWithLDAPIdentity(w http.ResponseWriter, r *http.Request) { + // Extract parameters from form + roleArn := r.FormValue("RoleArn") + roleSessionName := r.FormValue("RoleSessionName") + ldapUsername := r.FormValue(stsLDAPUsername) + ldapPassword := r.FormValue(stsLDAPPassword) + + // Validate required parameters + if roleArn == "" { + h.writeSTSErrorResponse(w, r, STSErrMissingParameter, + fmt.Errorf("RoleArn is required")) + return + } + + if roleSessionName == "" { + h.writeSTSErrorResponse(w, r, STSErrMissingParameter, + fmt.Errorf("RoleSessionName is required")) + return + } + + if ldapUsername == "" { + h.writeSTSErrorResponse(w, r, STSErrMissingParameter, + fmt.Errorf("LDAPUsername is required")) + return + } + + if ldapPassword == "" { + h.writeSTSErrorResponse(w, r, STSErrMissingParameter, + fmt.Errorf("LDAPPassword is required")) + return + } + + // Parse and validate DurationSeconds using helper + durationSeconds, errCode, err := parseDurationSeconds(r) + if err != nil { + h.writeSTSErrorResponse(w, r, errCode, err) + return + } + + // Check if STS service is initialized + if h.stsService == nil || !h.stsService.IsInitialized() { + h.writeSTSErrorResponse(w, r, STSErrSTSNotReady, + fmt.Errorf("STS service not initialized")) + return + } + + // Optional: specific LDAP provider name + ldapProviderName := r.FormValue(stsLDAPProviderName) + + // Find an LDAP provider from the registered providers + var ldapProvider *ldap.LDAPProvider + ldapProvidersFound := 0 + for _, provider := range h.stsService.GetProviders() { + // Check if this is an LDAP provider by type assertion + if p, ok := provider.(*ldap.LDAPProvider); ok { + if ldapProviderName != "" && p.Name() == ldapProviderName { + ldapProvider = p + break + } else if ldapProviderName == "" && ldapProvider == nil { + ldapProvider = p + } + ldapProvidersFound++ + } + } + + if ldapProvidersFound > 1 && ldapProviderName == "" { + glog.Warningf("Multiple LDAP providers found (%d). Using the first one found (non-deterministic). Consider specifying LDAPProviderName.", ldapProvidersFound) + } + + if ldapProvider == nil { + glog.V(2).Infof("AssumeRoleWithLDAPIdentity: no LDAP provider configured") + h.writeSTSErrorResponse(w, r, STSErrSTSNotReady, + fmt.Errorf("no LDAP provider configured - please add an LDAP provider to IAM configuration")) + return + } + + // Authenticate with LDAP provider + // The provider expects credentials in "username:password" format + credentials := ldapUsername + ":" + ldapPassword + identity, err := ldapProvider.Authenticate(r.Context(), credentials) + if err != nil { + glog.V(2).Infof("AssumeRoleWithLDAPIdentity: LDAP authentication failed for user %s: %v", ldapUsername, err) + h.writeSTSErrorResponse(w, r, STSErrAccessDenied, + fmt.Errorf("authentication failed")) + return + } + + glog.V(2).Infof("AssumeRoleWithLDAPIdentity: user %s authenticated successfully, groups=%v", + ldapUsername, identity.Groups) + + // Verify that the identity is allowed to assume the role + // We create a temporary identity to represent the LDAP user for permission checking + // The checking logic will verify if the role's trust policy allows this principal + // Use configured account ID or default to "111122223333" for federated users + accountId := defaultAccountId + if h.stsService != nil && h.stsService.Config != nil && h.stsService.Config.AccountId != "" { + accountId = h.stsService.Config.AccountId + } + + ldapUserIdentity := &Identity{ + Name: identity.UserID, + Account: &Account{ + DisplayName: identity.DisplayName, + EmailAddress: identity.Email, + Id: identity.UserID, + }, + PrincipalArn: fmt.Sprintf("arn:aws:iam::%s:user/%s", accountId, identity.UserID), + } + + // Verify that the identity is allowed to assume the role by checking the Trust Policy + // The LDAP user doesn't have identity policies, so we strictly check if the Role trusts this principal. + if err := h.iam.ValidateTrustPolicyForPrincipal(r.Context(), roleArn, ldapUserIdentity.PrincipalArn); err != nil { + glog.V(2).Infof("AssumeRoleWithLDAPIdentity: trust policy validation failed for %s to assume %s: %v", ldapUsername, roleArn, err) + h.writeSTSErrorResponse(w, r, STSErrAccessDenied, fmt.Errorf("trust policy denies access")) + return + } + + // Generate common STS components with LDAP-specific claims + modifyClaims := func(claims *sts.STSSessionClaims) { + claims.WithIdentityProvider("ldap", identity.UserID, identity.Provider) + } + + stsCreds, assumedUser, err := h.prepareSTSCredentials(roleArn, roleSessionName, ldapUserIdentity.PrincipalArn, durationSeconds, modifyClaims) + if err != nil { + h.writeSTSErrorResponse(w, r, STSErrInternalError, err) + return + } + + // Build and return response + xmlResponse := &AssumeRoleWithLDAPIdentityResponse{ + Result: LDAPIdentityResult{ + Credentials: stsCreds, + AssumedRoleUser: assumedUser, + }, + } + xmlResponse.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano()) + + s3err.WriteXMLResponse(w, r, http.StatusOK, xmlResponse) +} + +// prepareSTSCredentials extracts common shared logic for credential generation +func (h *STSHandlers) prepareSTSCredentials(roleArn, roleSessionName, principalArn string, + durationSeconds *int64, modifyClaims func(*sts.STSSessionClaims)) (STSCredentials, *AssumedRoleUser, error) { + + // Calculate duration + duration := time.Hour // Default 1 hour + if durationSeconds != nil { + duration = time.Duration(*durationSeconds) * time.Second + } + + // Generate session ID + sessionId, err := sts.GenerateSessionId() + if err != nil { + return STSCredentials{}, nil, fmt.Errorf("failed to generate session ID: %w", err) + } + + expiration := time.Now().Add(duration) + + // Extract role name from ARN for proper response formatting + roleName := utils.ExtractRoleNameFromArn(roleArn) + if roleName == "" { + roleName = roleArn // Fallback to full ARN if extraction fails + } + + // Create session claims with role information + claims := sts.NewSTSSessionClaims(sessionId, h.stsService.Config.Issuer, expiration). + WithSessionName(roleSessionName). + WithRoleInfo(roleArn, fmt.Sprintf("%s:%s", roleName, roleSessionName), principalArn) + + // Apply custom claims if provided (e.g., LDAP identity) + if modifyClaims != nil { + modifyClaims(claims) + } + + // Generate JWT session token + sessionToken, err := h.stsService.GetTokenGenerator().GenerateJWTWithClaims(claims) + if err != nil { + return STSCredentials{}, nil, fmt.Errorf("failed to generate session token: %w", err) + } + + // Generate temporary credentials (cryptographically secure) + // AccessKeyId: ASIA + 16 chars hex + // SecretAccessKey: 40 chars base64 + randBytes := make([]byte, 30) // Sufficient for both + if _, err := rand.Read(randBytes); err != nil { + return STSCredentials{}, nil, fmt.Errorf("failed to generate random bytes: %w", err) + } + + // Generate AccessKeyId (ASIA + 16 upper-hex chars) + // We use 8 bytes (16 hex chars) + accessKeyId := "ASIA" + fmt.Sprintf("%X", randBytes[:8]) + + // Generate SecretAccessKey: 30 random bytes, base64-encoded to a 40-character string + secretBytes := make([]byte, 30) + if _, err := rand.Read(secretBytes); err != nil { + return STSCredentials{}, nil, fmt.Errorf("failed to generate secret bytes: %w", err) + } + secretAccessKey := base64.StdEncoding.EncodeToString(secretBytes) + + // Get account ID from STS config or use default + accountId := defaultAccountId + if h.stsService != nil && h.stsService.Config != nil && h.stsService.Config.AccountId != "" { + accountId = h.stsService.Config.AccountId + } + + stsCreds := STSCredentials{ + AccessKeyId: accessKeyId, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + Expiration: expiration.Format(time.RFC3339), + } + + assumedUser := &AssumedRoleUser{ + AssumedRoleId: fmt.Sprintf("%s:%s", roleName, roleSessionName), + Arn: fmt.Sprintf("arn:aws:sts::%s:assumed-role/%s/%s", accountId, roleName, roleSessionName), + } + + return stsCreds, assumedUser, nil +} + // STS Response types for XML marshaling // AssumeRoleWithWebIdentityResponse is the response for AssumeRoleWithWebIdentity @@ -211,6 +560,36 @@ type AssumedRoleUser struct { Arn string `xml:"Arn"` } +// AssumeRoleResponse is the response for AssumeRole +type AssumeRoleResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse"` + Result AssumeRoleResult `xml:"AssumeRoleResult"` + ResponseMetadata struct { + RequestId string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// AssumeRoleResult contains the result of AssumeRole +type AssumeRoleResult struct { + Credentials STSCredentials `xml:"Credentials"` + AssumedRoleUser *AssumedRoleUser `xml:"AssumedRoleUser,omitempty"` +} + +// AssumeRoleWithLDAPIdentityResponse is the response for AssumeRoleWithLDAPIdentity +type AssumeRoleWithLDAPIdentityResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse"` + Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` + ResponseMetadata struct { + RequestId string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// LDAPIdentityResult contains the result of AssumeRoleWithLDAPIdentity +type LDAPIdentityResult struct { + Credentials STSCredentials `xml:"Credentials"` + AssumedRoleUser *AssumedRoleUser `xml:"AssumedRoleUser,omitempty"` +} + // STS Error types // STSErrorCode represents STS error codes From 2af293ce60354d79906350109e14131a61332a1f Mon Sep 17 00:00:00 2001 From: Lisandro Pin Date: Mon, 12 Jan 2026 19:49:59 +0100 Subject: [PATCH 03/17] Boostrap persistent state for volume servers. (#7984) This PR implements logic load/save persistent state information for storages associated with volume servers, and reporting state changes back to masters via heartbeat messages. More work ensues! See https://github.com/seaweedfs/seaweedfs/issues/7977 for details. --- weed/pb/master.proto | 5 + weed/pb/master_pb/master.pb.go | 185 +-- weed/pb/volume_server.proto | 10 + weed/pb/volume_server_pb/volume_server.pb.go | 1133 +++++++++-------- .../volume_server_pb/volume_server_grpc.pb.go | 777 ++++++----- weed/server/master_grpc_server.go | 1 + weed/server/volume_grpc_client_to_master.go | 13 + weed/storage/disk_location.go | 9 +- weed/storage/store.go | 68 +- weed/storage/store_state.go | 71 ++ 10 files changed, 1331 insertions(+), 941 deletions(-) create mode 100644 weed/storage/store_state.go diff --git a/weed/pb/master.proto b/weed/pb/master.proto index bf93bd104..a6a7b15e2 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -4,6 +4,8 @@ package master_pb; option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"; +import "volume_server.proto"; + ////////////////////////////////////////////////// service Seaweed { @@ -84,6 +86,9 @@ message Heartbeat { uint32 grpc_port = 20; repeated string location_uuids = 21; string id = 22; // volume server id, independent of ip:port for stable identification + + // state flags + volume_server_pb.VolumeServerState state = 23; } message HeartbeatResponse { diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index e8376011e..a197a972b 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -11,6 +11,7 @@ import ( sync "sync" unsafe "unsafe" + volume_server_pb "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -46,8 +47,10 @@ type Heartbeat struct { GrpcPort uint32 `protobuf:"varint,20,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` LocationUuids []string `protobuf:"bytes,21,rep,name=location_uuids,json=locationUuids,proto3" json:"location_uuids,omitempty"` Id string `protobuf:"bytes,22,opt,name=id,proto3" json:"id,omitempty"` // volume server id, independent of ip:port for stable identification - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // state flags + State *volume_server_pb.VolumeServerState `protobuf:"bytes,23,opt,name=state,proto3" json:"state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Heartbeat) Reset() { @@ -213,6 +216,13 @@ func (x *Heartbeat) GetId() string { return "" } +func (x *Heartbeat) GetState() *volume_server_pb.VolumeServerState { + if x != nil { + return x.State + } + return nil +} + type HeartbeatResponse struct { state protoimpl.MessageState `protogen:"open.v1"` VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"` @@ -4159,7 +4169,7 @@ var File_master_proto protoreflect.FileDescriptor const file_master_proto_rawDesc = "" + "\n" + - "\fmaster.proto\x12\tmaster_pb\"\xd0\a\n" + + "\fmaster.proto\x12\tmaster_pb\x1a\x13volume_server.proto\"\x8b\b\n" + "\tHeartbeat\x12\x0e\n" + "\x02ip\x18\x01 \x01(\tR\x02ip\x12\x12\n" + "\x04port\x18\x02 \x01(\rR\x04port\x12\x1d\n" + @@ -4185,7 +4195,8 @@ const file_master_proto_rawDesc = "" + "\x11max_volume_counts\x18\x04 \x03(\v2).master_pb.Heartbeat.MaxVolumeCountsEntryR\x0fmaxVolumeCounts\x12\x1b\n" + "\tgrpc_port\x18\x14 \x01(\rR\bgrpcPort\x12%\n" + "\x0elocation_uuids\x18\x15 \x03(\tR\rlocationUuids\x12\x0e\n" + - "\x02id\x18\x16 \x01(\tR\x02id\x1aB\n" + + "\x02id\x18\x16 \x01(\tR\x02id\x129\n" + + "\x05state\x18\x17 \x01(\v2#.volume_server_pb.VolumeServerStateR\x05state\x1aB\n" + "\x14MaxVolumeCountsEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + "\x05value\x18\x02 \x01(\rR\x05value:\x028\x01\"\xcd\x02\n" + @@ -4634,6 +4645,7 @@ var file_master_proto_goTypes = []any{ (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 69: master_pb.LookupEcVolumeResponse.EcShardIdLocation (*ListClusterNodesResponse_ClusterNode)(nil), // 70: master_pb.ListClusterNodesResponse.ClusterNode (*RaftListClusterServersResponse_ClusterServers)(nil), // 71: master_pb.RaftListClusterServersResponse.ClusterServers + (*volume_server_pb.VolumeServerState)(nil), // 72: volume_server_pb.VolumeServerState } var file_master_proto_depIdxs = []int32{ 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage @@ -4643,88 +4655,89 @@ var file_master_proto_depIdxs = []int32{ 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage 61, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry - 5, // 7: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend - 62, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry - 63, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding - 9, // 10: master_pb.KeepConnectedResponse.volume_location:type_name -> master_pb.VolumeLocation - 10, // 11: master_pb.KeepConnectedResponse.cluster_node_update:type_name -> master_pb.ClusterNodeUpdate - 64, // 12: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation - 14, // 13: master_pb.AssignResponse.replicas:type_name -> master_pb.Location - 14, // 14: master_pb.AssignResponse.location:type_name -> master_pb.Location - 20, // 15: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection - 2, // 16: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage - 4, // 17: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage - 65, // 18: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry - 26, // 19: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo - 66, // 20: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry - 27, // 21: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo - 67, // 22: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry - 28, // 23: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo - 68, // 24: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry - 29, // 25: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo - 69, // 26: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation - 5, // 27: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend - 70, // 28: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode - 71, // 29: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers - 14, // 30: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location - 25, // 31: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo - 25, // 32: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo - 25, // 33: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo - 25, // 34: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo - 14, // 35: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location - 0, // 36: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat - 8, // 37: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest - 12, // 38: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest - 15, // 39: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest - 15, // 40: master_pb.Seaweed.StreamAssign:input_type -> master_pb.AssignRequest - 18, // 41: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest - 21, // 42: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest - 23, // 43: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest - 30, // 44: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest - 32, // 45: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest - 34, // 46: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest - 36, // 47: master_pb.Seaweed.DisableVacuum:input_type -> master_pb.DisableVacuumRequest - 38, // 48: master_pb.Seaweed.EnableVacuum:input_type -> master_pb.EnableVacuumRequest - 40, // 49: master_pb.Seaweed.VolumeMarkReadonly:input_type -> master_pb.VolumeMarkReadonlyRequest - 42, // 50: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest - 44, // 51: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest - 46, // 52: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest - 48, // 53: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest - 50, // 54: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest - 56, // 55: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest - 52, // 56: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest - 54, // 57: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest - 58, // 58: master_pb.Seaweed.RaftLeadershipTransfer:input_type -> master_pb.RaftLeadershipTransferRequest - 16, // 59: master_pb.Seaweed.VolumeGrow:input_type -> master_pb.VolumeGrowRequest - 1, // 60: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse - 11, // 61: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse - 13, // 62: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse - 17, // 63: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse - 17, // 64: master_pb.Seaweed.StreamAssign:output_type -> master_pb.AssignResponse - 19, // 65: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse - 22, // 66: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse - 24, // 67: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse - 31, // 68: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse - 33, // 69: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse - 35, // 70: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse - 37, // 71: master_pb.Seaweed.DisableVacuum:output_type -> master_pb.DisableVacuumResponse - 39, // 72: master_pb.Seaweed.EnableVacuum:output_type -> master_pb.EnableVacuumResponse - 41, // 73: master_pb.Seaweed.VolumeMarkReadonly:output_type -> master_pb.VolumeMarkReadonlyResponse - 43, // 74: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse - 45, // 75: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse - 47, // 76: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse - 49, // 77: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse - 51, // 78: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse - 57, // 79: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse - 53, // 80: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse - 55, // 81: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse - 59, // 82: master_pb.Seaweed.RaftLeadershipTransfer:output_type -> master_pb.RaftLeadershipTransferResponse - 60, // 83: master_pb.Seaweed.VolumeGrow:output_type -> master_pb.VolumeGrowResponse - 60, // [60:84] is the sub-list for method output_type - 36, // [36:60] is the sub-list for method input_type - 36, // [36:36] is the sub-list for extension type_name - 36, // [36:36] is the sub-list for extension extendee - 0, // [0:36] is the sub-list for field type_name + 72, // 7: master_pb.Heartbeat.state:type_name -> volume_server_pb.VolumeServerState + 5, // 8: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend + 62, // 9: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry + 63, // 10: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding + 9, // 11: master_pb.KeepConnectedResponse.volume_location:type_name -> master_pb.VolumeLocation + 10, // 12: master_pb.KeepConnectedResponse.cluster_node_update:type_name -> master_pb.ClusterNodeUpdate + 64, // 13: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation + 14, // 14: master_pb.AssignResponse.replicas:type_name -> master_pb.Location + 14, // 15: master_pb.AssignResponse.location:type_name -> master_pb.Location + 20, // 16: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection + 2, // 17: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage + 4, // 18: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage + 65, // 19: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry + 26, // 20: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo + 66, // 21: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry + 27, // 22: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo + 67, // 23: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry + 28, // 24: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo + 68, // 25: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry + 29, // 26: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo + 69, // 27: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation + 5, // 28: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend + 70, // 29: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode + 71, // 30: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers + 14, // 31: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location + 25, // 32: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 25, // 33: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 25, // 34: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 25, // 35: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 14, // 36: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location + 0, // 37: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat + 8, // 38: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest + 12, // 39: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest + 15, // 40: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest + 15, // 41: master_pb.Seaweed.StreamAssign:input_type -> master_pb.AssignRequest + 18, // 42: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest + 21, // 43: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest + 23, // 44: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest + 30, // 45: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest + 32, // 46: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest + 34, // 47: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest + 36, // 48: master_pb.Seaweed.DisableVacuum:input_type -> master_pb.DisableVacuumRequest + 38, // 49: master_pb.Seaweed.EnableVacuum:input_type -> master_pb.EnableVacuumRequest + 40, // 50: master_pb.Seaweed.VolumeMarkReadonly:input_type -> master_pb.VolumeMarkReadonlyRequest + 42, // 51: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest + 44, // 52: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest + 46, // 53: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest + 48, // 54: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest + 50, // 55: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest + 56, // 56: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest + 52, // 57: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest + 54, // 58: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest + 58, // 59: master_pb.Seaweed.RaftLeadershipTransfer:input_type -> master_pb.RaftLeadershipTransferRequest + 16, // 60: master_pb.Seaweed.VolumeGrow:input_type -> master_pb.VolumeGrowRequest + 1, // 61: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse + 11, // 62: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse + 13, // 63: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse + 17, // 64: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse + 17, // 65: master_pb.Seaweed.StreamAssign:output_type -> master_pb.AssignResponse + 19, // 66: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse + 22, // 67: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse + 24, // 68: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse + 31, // 69: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse + 33, // 70: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse + 35, // 71: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse + 37, // 72: master_pb.Seaweed.DisableVacuum:output_type -> master_pb.DisableVacuumResponse + 39, // 73: master_pb.Seaweed.EnableVacuum:output_type -> master_pb.EnableVacuumResponse + 41, // 74: master_pb.Seaweed.VolumeMarkReadonly:output_type -> master_pb.VolumeMarkReadonlyResponse + 43, // 75: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse + 45, // 76: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse + 47, // 77: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse + 49, // 78: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse + 51, // 79: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse + 57, // 80: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse + 53, // 81: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse + 55, // 82: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse + 59, // 83: master_pb.Seaweed.RaftLeadershipTransfer:output_type -> master_pb.RaftLeadershipTransferResponse + 60, // 84: master_pb.Seaweed.VolumeGrow:output_type -> master_pb.VolumeGrowResponse + 61, // [61:85] is the sub-list for method output_type + 37, // [37:61] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_master_proto_init() } diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index d0d664f74..36dd513b9 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -7,6 +7,14 @@ import "remote.proto"; ////////////////////////////////////////////////// +// Persistent state for volume servers. +message VolumeServerState { + // Whether the server is in maintenance (i.e. read-only) mode. + bool maintenance = 1; +} + +////////////////////////////////////////////////// + service VolumeServer { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) { @@ -45,6 +53,7 @@ service VolumeServer { } rpc VolumeStatus (VolumeStatusRequest) returns (VolumeStatusResponse) { } + // TODO(issues/7977): add RPCs to control state flags // copy the .idx .dat files, and mount this volume rpc VolumeCopy (VolumeCopyRequest) returns (stream VolumeCopyResponse) { @@ -569,6 +578,7 @@ message VolumeServerStatusRequest { } message VolumeServerStatusResponse { + // TODO(issues/7977): add volume server state to response repeated DiskStatus disk_statuses = 1; MemStatus memory_status = 2; string version = 3; diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index cf0ad3ffb..e7bbce7aa 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -23,6 +23,52 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Describes persistent state for volume servers. +type VolumeServerState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Whether the server is in maintenance (i.e. read-only) mode. + Maintenance bool `protobuf:"varint,1,opt,name=maintenance,proto3" json:"maintenance,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VolumeServerState) Reset() { + *x = VolumeServerState{} + mi := &file_volume_server_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VolumeServerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerState) ProtoMessage() {} + +func (x *VolumeServerState) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeServerState.ProtoReflect.Descriptor instead. +func (*VolumeServerState) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{0} +} + +func (x *VolumeServerState) GetMaintenance() bool { + if x != nil { + return x.Maintenance + } + return false +} + type BatchDeleteRequest struct { state protoimpl.MessageState `protogen:"open.v1"` FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"` @@ -33,7 +79,7 @@ type BatchDeleteRequest struct { func (x *BatchDeleteRequest) Reset() { *x = BatchDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[0] + mi := &file_volume_server_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -45,7 +91,7 @@ func (x *BatchDeleteRequest) String() string { func (*BatchDeleteRequest) ProtoMessage() {} func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[0] + mi := &file_volume_server_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -58,7 +104,7 @@ func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchDeleteRequest.ProtoReflect.Descriptor instead. func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{0} + return file_volume_server_proto_rawDescGZIP(), []int{1} } func (x *BatchDeleteRequest) GetFileIds() []string { @@ -84,7 +130,7 @@ type BatchDeleteResponse struct { func (x *BatchDeleteResponse) Reset() { *x = BatchDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[1] + mi := &file_volume_server_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -96,7 +142,7 @@ func (x *BatchDeleteResponse) String() string { func (*BatchDeleteResponse) ProtoMessage() {} func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[1] + mi := &file_volume_server_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -109,7 +155,7 @@ func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchDeleteResponse.ProtoReflect.Descriptor instead. func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{1} + return file_volume_server_proto_rawDescGZIP(), []int{2} } func (x *BatchDeleteResponse) GetResults() []*DeleteResult { @@ -132,7 +178,7 @@ type DeleteResult struct { func (x *DeleteResult) Reset() { *x = DeleteResult{} - mi := &file_volume_server_proto_msgTypes[2] + mi := &file_volume_server_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -144,7 +190,7 @@ func (x *DeleteResult) String() string { func (*DeleteResult) ProtoMessage() {} func (x *DeleteResult) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[2] + mi := &file_volume_server_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -157,7 +203,7 @@ func (x *DeleteResult) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteResult.ProtoReflect.Descriptor instead. func (*DeleteResult) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{2} + return file_volume_server_proto_rawDescGZIP(), []int{3} } func (x *DeleteResult) GetFileId() string { @@ -203,7 +249,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - mi := &file_volume_server_proto_msgTypes[3] + mi := &file_volume_server_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -215,7 +261,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[3] + mi := &file_volume_server_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -228,7 +274,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{3} + return file_volume_server_proto_rawDescGZIP(), []int{4} } type VacuumVolumeCheckRequest struct { @@ -240,7 +286,7 @@ type VacuumVolumeCheckRequest struct { func (x *VacuumVolumeCheckRequest) Reset() { *x = VacuumVolumeCheckRequest{} - mi := &file_volume_server_proto_msgTypes[4] + mi := &file_volume_server_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -252,7 +298,7 @@ func (x *VacuumVolumeCheckRequest) String() string { func (*VacuumVolumeCheckRequest) ProtoMessage() {} func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[4] + mi := &file_volume_server_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -265,7 +311,7 @@ func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCheckRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{4} + return file_volume_server_proto_rawDescGZIP(), []int{5} } func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 { @@ -284,7 +330,7 @@ type VacuumVolumeCheckResponse struct { func (x *VacuumVolumeCheckResponse) Reset() { *x = VacuumVolumeCheckResponse{} - mi := &file_volume_server_proto_msgTypes[5] + mi := &file_volume_server_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -296,7 +342,7 @@ func (x *VacuumVolumeCheckResponse) String() string { func (*VacuumVolumeCheckResponse) ProtoMessage() {} func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[5] + mi := &file_volume_server_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -309,7 +355,7 @@ func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCheckResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{5} + return file_volume_server_proto_rawDescGZIP(), []int{6} } func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { @@ -329,7 +375,7 @@ type VacuumVolumeCompactRequest struct { func (x *VacuumVolumeCompactRequest) Reset() { *x = VacuumVolumeCompactRequest{} - mi := &file_volume_server_proto_msgTypes[6] + mi := &file_volume_server_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -341,7 +387,7 @@ func (x *VacuumVolumeCompactRequest) String() string { func (*VacuumVolumeCompactRequest) ProtoMessage() {} func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[6] + mi := &file_volume_server_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -354,7 +400,7 @@ func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCompactRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{6} + return file_volume_server_proto_rawDescGZIP(), []int{7} } func (x *VacuumVolumeCompactRequest) GetVolumeId() uint32 { @@ -381,7 +427,7 @@ type VacuumVolumeCompactResponse struct { func (x *VacuumVolumeCompactResponse) Reset() { *x = VacuumVolumeCompactResponse{} - mi := &file_volume_server_proto_msgTypes[7] + mi := &file_volume_server_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -393,7 +439,7 @@ func (x *VacuumVolumeCompactResponse) String() string { func (*VacuumVolumeCompactResponse) ProtoMessage() {} func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[7] + mi := &file_volume_server_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -406,7 +452,7 @@ func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCompactResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{7} + return file_volume_server_proto_rawDescGZIP(), []int{8} } func (x *VacuumVolumeCompactResponse) GetProcessedBytes() int64 { @@ -432,7 +478,7 @@ type VacuumVolumeCommitRequest struct { func (x *VacuumVolumeCommitRequest) Reset() { *x = VacuumVolumeCommitRequest{} - mi := &file_volume_server_proto_msgTypes[8] + mi := &file_volume_server_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -444,7 +490,7 @@ func (x *VacuumVolumeCommitRequest) String() string { func (*VacuumVolumeCommitRequest) ProtoMessage() {} func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[8] + mi := &file_volume_server_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -457,7 +503,7 @@ func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCommitRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{8} + return file_volume_server_proto_rawDescGZIP(), []int{9} } func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 { @@ -477,7 +523,7 @@ type VacuumVolumeCommitResponse struct { func (x *VacuumVolumeCommitResponse) Reset() { *x = VacuumVolumeCommitResponse{} - mi := &file_volume_server_proto_msgTypes[9] + mi := &file_volume_server_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -489,7 +535,7 @@ func (x *VacuumVolumeCommitResponse) String() string { func (*VacuumVolumeCommitResponse) ProtoMessage() {} func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[9] + mi := &file_volume_server_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -502,7 +548,7 @@ func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCommitResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{9} + return file_volume_server_proto_rawDescGZIP(), []int{10} } func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool { @@ -528,7 +574,7 @@ type VacuumVolumeCleanupRequest struct { func (x *VacuumVolumeCleanupRequest) Reset() { *x = VacuumVolumeCleanupRequest{} - mi := &file_volume_server_proto_msgTypes[10] + mi := &file_volume_server_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -540,7 +586,7 @@ func (x *VacuumVolumeCleanupRequest) String() string { func (*VacuumVolumeCleanupRequest) ProtoMessage() {} func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[10] + mi := &file_volume_server_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -553,7 +599,7 @@ func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCleanupRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{10} + return file_volume_server_proto_rawDescGZIP(), []int{11} } func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { @@ -571,7 +617,7 @@ type VacuumVolumeCleanupResponse struct { func (x *VacuumVolumeCleanupResponse) Reset() { *x = VacuumVolumeCleanupResponse{} - mi := &file_volume_server_proto_msgTypes[11] + mi := &file_volume_server_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -583,7 +629,7 @@ func (x *VacuumVolumeCleanupResponse) String() string { func (*VacuumVolumeCleanupResponse) ProtoMessage() {} func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[11] + mi := &file_volume_server_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -596,7 +642,7 @@ func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeCleanupResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{11} + return file_volume_server_proto_rawDescGZIP(), []int{12} } type DeleteCollectionRequest struct { @@ -608,7 +654,7 @@ type DeleteCollectionRequest struct { func (x *DeleteCollectionRequest) Reset() { *x = DeleteCollectionRequest{} - mi := &file_volume_server_proto_msgTypes[12] + mi := &file_volume_server_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -620,7 +666,7 @@ func (x *DeleteCollectionRequest) String() string { func (*DeleteCollectionRequest) ProtoMessage() {} func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[12] + mi := &file_volume_server_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -633,7 +679,7 @@ func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{12} + return file_volume_server_proto_rawDescGZIP(), []int{13} } func (x *DeleteCollectionRequest) GetCollection() string { @@ -651,7 +697,7 @@ type DeleteCollectionResponse struct { func (x *DeleteCollectionResponse) Reset() { *x = DeleteCollectionResponse{} - mi := &file_volume_server_proto_msgTypes[13] + mi := &file_volume_server_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -663,7 +709,7 @@ func (x *DeleteCollectionResponse) String() string { func (*DeleteCollectionResponse) ProtoMessage() {} func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[13] + mi := &file_volume_server_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -676,7 +722,7 @@ func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{13} + return file_volume_server_proto_rawDescGZIP(), []int{14} } type AllocateVolumeRequest struct { @@ -695,7 +741,7 @@ type AllocateVolumeRequest struct { func (x *AllocateVolumeRequest) Reset() { *x = AllocateVolumeRequest{} - mi := &file_volume_server_proto_msgTypes[14] + mi := &file_volume_server_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -707,7 +753,7 @@ func (x *AllocateVolumeRequest) String() string { func (*AllocateVolumeRequest) ProtoMessage() {} func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[14] + mi := &file_volume_server_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -720,7 +766,7 @@ func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AllocateVolumeRequest.ProtoReflect.Descriptor instead. func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{14} + return file_volume_server_proto_rawDescGZIP(), []int{15} } func (x *AllocateVolumeRequest) GetVolumeId() uint32 { @@ -787,7 +833,7 @@ type AllocateVolumeResponse struct { func (x *AllocateVolumeResponse) Reset() { *x = AllocateVolumeResponse{} - mi := &file_volume_server_proto_msgTypes[15] + mi := &file_volume_server_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -799,7 +845,7 @@ func (x *AllocateVolumeResponse) String() string { func (*AllocateVolumeResponse) ProtoMessage() {} func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[15] + mi := &file_volume_server_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -812,7 +858,7 @@ func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AllocateVolumeResponse.ProtoReflect.Descriptor instead. func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{15} + return file_volume_server_proto_rawDescGZIP(), []int{16} } type VolumeSyncStatusRequest struct { @@ -824,7 +870,7 @@ type VolumeSyncStatusRequest struct { func (x *VolumeSyncStatusRequest) Reset() { *x = VolumeSyncStatusRequest{} - mi := &file_volume_server_proto_msgTypes[16] + mi := &file_volume_server_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -836,7 +882,7 @@ func (x *VolumeSyncStatusRequest) String() string { func (*VolumeSyncStatusRequest) ProtoMessage() {} func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[16] + mi := &file_volume_server_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -849,7 +895,7 @@ func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeSyncStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{16} + return file_volume_server_proto_rawDescGZIP(), []int{17} } func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 { @@ -875,7 +921,7 @@ type VolumeSyncStatusResponse struct { func (x *VolumeSyncStatusResponse) Reset() { *x = VolumeSyncStatusResponse{} - mi := &file_volume_server_proto_msgTypes[17] + mi := &file_volume_server_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -887,7 +933,7 @@ func (x *VolumeSyncStatusResponse) String() string { func (*VolumeSyncStatusResponse) ProtoMessage() {} func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[17] + mi := &file_volume_server_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -900,7 +946,7 @@ func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeSyncStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{17} + return file_volume_server_proto_rawDescGZIP(), []int{18} } func (x *VolumeSyncStatusResponse) GetVolumeId() uint32 { @@ -969,7 +1015,7 @@ type VolumeIncrementalCopyRequest struct { func (x *VolumeIncrementalCopyRequest) Reset() { *x = VolumeIncrementalCopyRequest{} - mi := &file_volume_server_proto_msgTypes[18] + mi := &file_volume_server_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -981,7 +1027,7 @@ func (x *VolumeIncrementalCopyRequest) String() string { func (*VolumeIncrementalCopyRequest) ProtoMessage() {} func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[18] + mi := &file_volume_server_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -994,7 +1040,7 @@ func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeIncrementalCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{18} + return file_volume_server_proto_rawDescGZIP(), []int{19} } func (x *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { @@ -1020,7 +1066,7 @@ type VolumeIncrementalCopyResponse struct { func (x *VolumeIncrementalCopyResponse) Reset() { *x = VolumeIncrementalCopyResponse{} - mi := &file_volume_server_proto_msgTypes[19] + mi := &file_volume_server_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1032,7 +1078,7 @@ func (x *VolumeIncrementalCopyResponse) String() string { func (*VolumeIncrementalCopyResponse) ProtoMessage() {} func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[19] + mi := &file_volume_server_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1045,7 +1091,7 @@ func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeIncrementalCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{19} + return file_volume_server_proto_rawDescGZIP(), []int{20} } func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte { @@ -1064,7 +1110,7 @@ type VolumeMountRequest struct { func (x *VolumeMountRequest) Reset() { *x = VolumeMountRequest{} - mi := &file_volume_server_proto_msgTypes[20] + mi := &file_volume_server_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1076,7 +1122,7 @@ func (x *VolumeMountRequest) String() string { func (*VolumeMountRequest) ProtoMessage() {} func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[20] + mi := &file_volume_server_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1089,7 +1135,7 @@ func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMountRequest.ProtoReflect.Descriptor instead. func (*VolumeMountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{20} + return file_volume_server_proto_rawDescGZIP(), []int{21} } func (x *VolumeMountRequest) GetVolumeId() uint32 { @@ -1107,7 +1153,7 @@ type VolumeMountResponse struct { func (x *VolumeMountResponse) Reset() { *x = VolumeMountResponse{} - mi := &file_volume_server_proto_msgTypes[21] + mi := &file_volume_server_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1119,7 +1165,7 @@ func (x *VolumeMountResponse) String() string { func (*VolumeMountResponse) ProtoMessage() {} func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[21] + mi := &file_volume_server_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1132,7 +1178,7 @@ func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMountResponse.ProtoReflect.Descriptor instead. func (*VolumeMountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{21} + return file_volume_server_proto_rawDescGZIP(), []int{22} } type VolumeUnmountRequest struct { @@ -1144,7 +1190,7 @@ type VolumeUnmountRequest struct { func (x *VolumeUnmountRequest) Reset() { *x = VolumeUnmountRequest{} - mi := &file_volume_server_proto_msgTypes[22] + mi := &file_volume_server_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1156,7 +1202,7 @@ func (x *VolumeUnmountRequest) String() string { func (*VolumeUnmountRequest) ProtoMessage() {} func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[22] + mi := &file_volume_server_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1169,7 +1215,7 @@ func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeUnmountRequest.ProtoReflect.Descriptor instead. func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{22} + return file_volume_server_proto_rawDescGZIP(), []int{23} } func (x *VolumeUnmountRequest) GetVolumeId() uint32 { @@ -1187,7 +1233,7 @@ type VolumeUnmountResponse struct { func (x *VolumeUnmountResponse) Reset() { *x = VolumeUnmountResponse{} - mi := &file_volume_server_proto_msgTypes[23] + mi := &file_volume_server_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1199,7 +1245,7 @@ func (x *VolumeUnmountResponse) String() string { func (*VolumeUnmountResponse) ProtoMessage() {} func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[23] + mi := &file_volume_server_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1212,7 +1258,7 @@ func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeUnmountResponse.ProtoReflect.Descriptor instead. func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{23} + return file_volume_server_proto_rawDescGZIP(), []int{24} } type VolumeDeleteRequest struct { @@ -1225,7 +1271,7 @@ type VolumeDeleteRequest struct { func (x *VolumeDeleteRequest) Reset() { *x = VolumeDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[24] + mi := &file_volume_server_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1237,7 +1283,7 @@ func (x *VolumeDeleteRequest) String() string { func (*VolumeDeleteRequest) ProtoMessage() {} func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[24] + mi := &file_volume_server_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1250,7 +1296,7 @@ func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{24} + return file_volume_server_proto_rawDescGZIP(), []int{25} } func (x *VolumeDeleteRequest) GetVolumeId() uint32 { @@ -1275,7 +1321,7 @@ type VolumeDeleteResponse struct { func (x *VolumeDeleteResponse) Reset() { *x = VolumeDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[25] + mi := &file_volume_server_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1287,7 +1333,7 @@ func (x *VolumeDeleteResponse) String() string { func (*VolumeDeleteResponse) ProtoMessage() {} func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[25] + mi := &file_volume_server_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1300,7 +1346,7 @@ func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{25} + return file_volume_server_proto_rawDescGZIP(), []int{26} } type VolumeMarkReadonlyRequest struct { @@ -1313,7 +1359,7 @@ type VolumeMarkReadonlyRequest struct { func (x *VolumeMarkReadonlyRequest) Reset() { *x = VolumeMarkReadonlyRequest{} - mi := &file_volume_server_proto_msgTypes[26] + mi := &file_volume_server_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1325,7 +1371,7 @@ func (x *VolumeMarkReadonlyRequest) String() string { func (*VolumeMarkReadonlyRequest) ProtoMessage() {} func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[26] + mi := &file_volume_server_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1338,7 +1384,7 @@ func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead. func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{26} + return file_volume_server_proto_rawDescGZIP(), []int{27} } func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { @@ -1363,7 +1409,7 @@ type VolumeMarkReadonlyResponse struct { func (x *VolumeMarkReadonlyResponse) Reset() { *x = VolumeMarkReadonlyResponse{} - mi := &file_volume_server_proto_msgTypes[27] + mi := &file_volume_server_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1375,7 +1421,7 @@ func (x *VolumeMarkReadonlyResponse) String() string { func (*VolumeMarkReadonlyResponse) ProtoMessage() {} func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[27] + mi := &file_volume_server_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1388,7 +1434,7 @@ func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead. func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{27} + return file_volume_server_proto_rawDescGZIP(), []int{28} } type VolumeMarkWritableRequest struct { @@ -1400,7 +1446,7 @@ type VolumeMarkWritableRequest struct { func (x *VolumeMarkWritableRequest) Reset() { *x = VolumeMarkWritableRequest{} - mi := &file_volume_server_proto_msgTypes[28] + mi := &file_volume_server_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1412,7 +1458,7 @@ func (x *VolumeMarkWritableRequest) String() string { func (*VolumeMarkWritableRequest) ProtoMessage() {} func (x *VolumeMarkWritableRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[28] + mi := &file_volume_server_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1425,7 +1471,7 @@ func (x *VolumeMarkWritableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMarkWritableRequest.ProtoReflect.Descriptor instead. func (*VolumeMarkWritableRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{28} + return file_volume_server_proto_rawDescGZIP(), []int{29} } func (x *VolumeMarkWritableRequest) GetVolumeId() uint32 { @@ -1443,7 +1489,7 @@ type VolumeMarkWritableResponse struct { func (x *VolumeMarkWritableResponse) Reset() { *x = VolumeMarkWritableResponse{} - mi := &file_volume_server_proto_msgTypes[29] + mi := &file_volume_server_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1455,7 +1501,7 @@ func (x *VolumeMarkWritableResponse) String() string { func (*VolumeMarkWritableResponse) ProtoMessage() {} func (x *VolumeMarkWritableResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[29] + mi := &file_volume_server_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1468,7 +1514,7 @@ func (x *VolumeMarkWritableResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeMarkWritableResponse.ProtoReflect.Descriptor instead. func (*VolumeMarkWritableResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{29} + return file_volume_server_proto_rawDescGZIP(), []int{30} } type VolumeConfigureRequest struct { @@ -1481,7 +1527,7 @@ type VolumeConfigureRequest struct { func (x *VolumeConfigureRequest) Reset() { *x = VolumeConfigureRequest{} - mi := &file_volume_server_proto_msgTypes[30] + mi := &file_volume_server_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1493,7 +1539,7 @@ func (x *VolumeConfigureRequest) String() string { func (*VolumeConfigureRequest) ProtoMessage() {} func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[30] + mi := &file_volume_server_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1506,7 +1552,7 @@ func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeConfigureRequest.ProtoReflect.Descriptor instead. func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{30} + return file_volume_server_proto_rawDescGZIP(), []int{31} } func (x *VolumeConfigureRequest) GetVolumeId() uint32 { @@ -1532,7 +1578,7 @@ type VolumeConfigureResponse struct { func (x *VolumeConfigureResponse) Reset() { *x = VolumeConfigureResponse{} - mi := &file_volume_server_proto_msgTypes[31] + mi := &file_volume_server_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1544,7 +1590,7 @@ func (x *VolumeConfigureResponse) String() string { func (*VolumeConfigureResponse) ProtoMessage() {} func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[31] + mi := &file_volume_server_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1557,7 +1603,7 @@ func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeConfigureResponse.ProtoReflect.Descriptor instead. func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{31} + return file_volume_server_proto_rawDescGZIP(), []int{32} } func (x *VolumeConfigureResponse) GetError() string { @@ -1576,7 +1622,7 @@ type VolumeStatusRequest struct { func (x *VolumeStatusRequest) Reset() { *x = VolumeStatusRequest{} - mi := &file_volume_server_proto_msgTypes[32] + mi := &file_volume_server_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1588,7 +1634,7 @@ func (x *VolumeStatusRequest) String() string { func (*VolumeStatusRequest) ProtoMessage() {} func (x *VolumeStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[32] + mi := &file_volume_server_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1601,7 +1647,7 @@ func (x *VolumeStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{32} + return file_volume_server_proto_rawDescGZIP(), []int{33} } func (x *VolumeStatusRequest) GetVolumeId() uint32 { @@ -1623,7 +1669,7 @@ type VolumeStatusResponse struct { func (x *VolumeStatusResponse) Reset() { *x = VolumeStatusResponse{} - mi := &file_volume_server_proto_msgTypes[33] + mi := &file_volume_server_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1635,7 +1681,7 @@ func (x *VolumeStatusResponse) String() string { func (*VolumeStatusResponse) ProtoMessage() {} func (x *VolumeStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[33] + mi := &file_volume_server_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1648,7 +1694,7 @@ func (x *VolumeStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{33} + return file_volume_server_proto_rawDescGZIP(), []int{34} } func (x *VolumeStatusResponse) GetIsReadOnly() bool { @@ -1694,7 +1740,7 @@ type VolumeCopyRequest struct { func (x *VolumeCopyRequest) Reset() { *x = VolumeCopyRequest{} - mi := &file_volume_server_proto_msgTypes[34] + mi := &file_volume_server_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1706,7 +1752,7 @@ func (x *VolumeCopyRequest) String() string { func (*VolumeCopyRequest) ProtoMessage() {} func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[34] + mi := &file_volume_server_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1719,7 +1765,7 @@ func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{34} + return file_volume_server_proto_rawDescGZIP(), []int{35} } func (x *VolumeCopyRequest) GetVolumeId() uint32 { @@ -1781,7 +1827,7 @@ type VolumeCopyResponse struct { func (x *VolumeCopyResponse) Reset() { *x = VolumeCopyResponse{} - mi := &file_volume_server_proto_msgTypes[35] + mi := &file_volume_server_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1793,7 +1839,7 @@ func (x *VolumeCopyResponse) String() string { func (*VolumeCopyResponse) ProtoMessage() {} func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[35] + mi := &file_volume_server_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1806,7 +1852,7 @@ func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{35} + return file_volume_server_proto_rawDescGZIP(), []int{36} } func (x *VolumeCopyResponse) GetLastAppendAtNs() uint64 { @@ -1838,7 +1884,7 @@ type CopyFileRequest struct { func (x *CopyFileRequest) Reset() { *x = CopyFileRequest{} - mi := &file_volume_server_proto_msgTypes[36] + mi := &file_volume_server_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1850,7 +1896,7 @@ func (x *CopyFileRequest) String() string { func (*CopyFileRequest) ProtoMessage() {} func (x *CopyFileRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[36] + mi := &file_volume_server_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1863,7 +1909,7 @@ func (x *CopyFileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CopyFileRequest.ProtoReflect.Descriptor instead. func (*CopyFileRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{36} + return file_volume_server_proto_rawDescGZIP(), []int{37} } func (x *CopyFileRequest) GetVolumeId() uint32 { @@ -1925,7 +1971,7 @@ type CopyFileResponse struct { func (x *CopyFileResponse) Reset() { *x = CopyFileResponse{} - mi := &file_volume_server_proto_msgTypes[37] + mi := &file_volume_server_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1937,7 +1983,7 @@ func (x *CopyFileResponse) String() string { func (*CopyFileResponse) ProtoMessage() {} func (x *CopyFileResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[37] + mi := &file_volume_server_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1950,7 +1996,7 @@ func (x *CopyFileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CopyFileResponse.ProtoReflect.Descriptor instead. func (*CopyFileResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{37} + return file_volume_server_proto_rawDescGZIP(), []int{38} } func (x *CopyFileResponse) GetFileContent() []byte { @@ -1980,7 +2026,7 @@ type ReceiveFileRequest struct { func (x *ReceiveFileRequest) Reset() { *x = ReceiveFileRequest{} - mi := &file_volume_server_proto_msgTypes[38] + mi := &file_volume_server_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1992,7 +2038,7 @@ func (x *ReceiveFileRequest) String() string { func (*ReceiveFileRequest) ProtoMessage() {} func (x *ReceiveFileRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[38] + mi := &file_volume_server_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2005,7 +2051,7 @@ func (x *ReceiveFileRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReceiveFileRequest.ProtoReflect.Descriptor instead. func (*ReceiveFileRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{38} + return file_volume_server_proto_rawDescGZIP(), []int{39} } func (x *ReceiveFileRequest) GetData() isReceiveFileRequest_Data { @@ -2063,7 +2109,7 @@ type ReceiveFileInfo struct { func (x *ReceiveFileInfo) Reset() { *x = ReceiveFileInfo{} - mi := &file_volume_server_proto_msgTypes[39] + mi := &file_volume_server_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2075,7 +2121,7 @@ func (x *ReceiveFileInfo) String() string { func (*ReceiveFileInfo) ProtoMessage() {} func (x *ReceiveFileInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[39] + mi := &file_volume_server_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2088,7 +2134,7 @@ func (x *ReceiveFileInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use ReceiveFileInfo.ProtoReflect.Descriptor instead. func (*ReceiveFileInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{39} + return file_volume_server_proto_rawDescGZIP(), []int{40} } func (x *ReceiveFileInfo) GetVolumeId() uint32 { @@ -2143,7 +2189,7 @@ type ReceiveFileResponse struct { func (x *ReceiveFileResponse) Reset() { *x = ReceiveFileResponse{} - mi := &file_volume_server_proto_msgTypes[40] + mi := &file_volume_server_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2155,7 +2201,7 @@ func (x *ReceiveFileResponse) String() string { func (*ReceiveFileResponse) ProtoMessage() {} func (x *ReceiveFileResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[40] + mi := &file_volume_server_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2168,7 +2214,7 @@ func (x *ReceiveFileResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReceiveFileResponse.ProtoReflect.Descriptor instead. func (*ReceiveFileResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{40} + return file_volume_server_proto_rawDescGZIP(), []int{41} } func (x *ReceiveFileResponse) GetBytesWritten() uint64 { @@ -2196,7 +2242,7 @@ type ReadNeedleBlobRequest struct { func (x *ReadNeedleBlobRequest) Reset() { *x = ReadNeedleBlobRequest{} - mi := &file_volume_server_proto_msgTypes[41] + mi := &file_volume_server_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2208,7 +2254,7 @@ func (x *ReadNeedleBlobRequest) String() string { func (*ReadNeedleBlobRequest) ProtoMessage() {} func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[41] + mi := &file_volume_server_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2221,7 +2267,7 @@ func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead. func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{41} + return file_volume_server_proto_rawDescGZIP(), []int{42} } func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 { @@ -2254,7 +2300,7 @@ type ReadNeedleBlobResponse struct { func (x *ReadNeedleBlobResponse) Reset() { *x = ReadNeedleBlobResponse{} - mi := &file_volume_server_proto_msgTypes[42] + mi := &file_volume_server_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2266,7 +2312,7 @@ func (x *ReadNeedleBlobResponse) String() string { func (*ReadNeedleBlobResponse) ProtoMessage() {} func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[42] + mi := &file_volume_server_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2279,7 +2325,7 @@ func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead. func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{42} + return file_volume_server_proto_rawDescGZIP(), []int{43} } func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte { @@ -2301,7 +2347,7 @@ type ReadNeedleMetaRequest struct { func (x *ReadNeedleMetaRequest) Reset() { *x = ReadNeedleMetaRequest{} - mi := &file_volume_server_proto_msgTypes[43] + mi := &file_volume_server_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2313,7 +2359,7 @@ func (x *ReadNeedleMetaRequest) String() string { func (*ReadNeedleMetaRequest) ProtoMessage() {} func (x *ReadNeedleMetaRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[43] + mi := &file_volume_server_proto_msgTypes[44] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2326,7 +2372,7 @@ func (x *ReadNeedleMetaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadNeedleMetaRequest.ProtoReflect.Descriptor instead. func (*ReadNeedleMetaRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{43} + return file_volume_server_proto_rawDescGZIP(), []int{44} } func (x *ReadNeedleMetaRequest) GetVolumeId() uint32 { @@ -2370,7 +2416,7 @@ type ReadNeedleMetaResponse struct { func (x *ReadNeedleMetaResponse) Reset() { *x = ReadNeedleMetaResponse{} - mi := &file_volume_server_proto_msgTypes[44] + mi := &file_volume_server_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2382,7 +2428,7 @@ func (x *ReadNeedleMetaResponse) String() string { func (*ReadNeedleMetaResponse) ProtoMessage() {} func (x *ReadNeedleMetaResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[44] + mi := &file_volume_server_proto_msgTypes[45] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2395,7 +2441,7 @@ func (x *ReadNeedleMetaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadNeedleMetaResponse.ProtoReflect.Descriptor instead. func (*ReadNeedleMetaResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{44} + return file_volume_server_proto_rawDescGZIP(), []int{45} } func (x *ReadNeedleMetaResponse) GetCookie() uint32 { @@ -2445,7 +2491,7 @@ type WriteNeedleBlobRequest struct { func (x *WriteNeedleBlobRequest) Reset() { *x = WriteNeedleBlobRequest{} - mi := &file_volume_server_proto_msgTypes[45] + mi := &file_volume_server_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2457,7 +2503,7 @@ func (x *WriteNeedleBlobRequest) String() string { func (*WriteNeedleBlobRequest) ProtoMessage() {} func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[45] + mi := &file_volume_server_proto_msgTypes[46] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2470,7 +2516,7 @@ func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead. func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{45} + return file_volume_server_proto_rawDescGZIP(), []int{46} } func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 { @@ -2509,7 +2555,7 @@ type WriteNeedleBlobResponse struct { func (x *WriteNeedleBlobResponse) Reset() { *x = WriteNeedleBlobResponse{} - mi := &file_volume_server_proto_msgTypes[46] + mi := &file_volume_server_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2521,7 +2567,7 @@ func (x *WriteNeedleBlobResponse) String() string { func (*WriteNeedleBlobResponse) ProtoMessage() {} func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[46] + mi := &file_volume_server_proto_msgTypes[47] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2534,7 +2580,7 @@ func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead. func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{46} + return file_volume_server_proto_rawDescGZIP(), []int{47} } type ReadAllNeedlesRequest struct { @@ -2546,7 +2592,7 @@ type ReadAllNeedlesRequest struct { func (x *ReadAllNeedlesRequest) Reset() { *x = ReadAllNeedlesRequest{} - mi := &file_volume_server_proto_msgTypes[47] + mi := &file_volume_server_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2558,7 +2604,7 @@ func (x *ReadAllNeedlesRequest) String() string { func (*ReadAllNeedlesRequest) ProtoMessage() {} func (x *ReadAllNeedlesRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[47] + mi := &file_volume_server_proto_msgTypes[48] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2571,7 +2617,7 @@ func (x *ReadAllNeedlesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadAllNeedlesRequest.ProtoReflect.Descriptor instead. func (*ReadAllNeedlesRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{47} + return file_volume_server_proto_rawDescGZIP(), []int{48} } func (x *ReadAllNeedlesRequest) GetVolumeIds() []uint32 { @@ -2598,7 +2644,7 @@ type ReadAllNeedlesResponse struct { func (x *ReadAllNeedlesResponse) Reset() { *x = ReadAllNeedlesResponse{} - mi := &file_volume_server_proto_msgTypes[48] + mi := &file_volume_server_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2610,7 +2656,7 @@ func (x *ReadAllNeedlesResponse) String() string { func (*ReadAllNeedlesResponse) ProtoMessage() {} func (x *ReadAllNeedlesResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[48] + mi := &file_volume_server_proto_msgTypes[49] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2623,7 +2669,7 @@ func (x *ReadAllNeedlesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadAllNeedlesResponse.ProtoReflect.Descriptor instead. func (*ReadAllNeedlesResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{48} + return file_volume_server_proto_rawDescGZIP(), []int{49} } func (x *ReadAllNeedlesResponse) GetVolumeId() uint32 { @@ -2700,7 +2746,7 @@ type VolumeTailSenderRequest struct { func (x *VolumeTailSenderRequest) Reset() { *x = VolumeTailSenderRequest{} - mi := &file_volume_server_proto_msgTypes[49] + mi := &file_volume_server_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2712,7 +2758,7 @@ func (x *VolumeTailSenderRequest) String() string { func (*VolumeTailSenderRequest) ProtoMessage() {} func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[49] + mi := &file_volume_server_proto_msgTypes[50] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2725,7 +2771,7 @@ func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead. func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{49} + return file_volume_server_proto_rawDescGZIP(), []int{50} } func (x *VolumeTailSenderRequest) GetVolumeId() uint32 { @@ -2761,7 +2807,7 @@ type VolumeTailSenderResponse struct { func (x *VolumeTailSenderResponse) Reset() { *x = VolumeTailSenderResponse{} - mi := &file_volume_server_proto_msgTypes[50] + mi := &file_volume_server_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2773,7 +2819,7 @@ func (x *VolumeTailSenderResponse) String() string { func (*VolumeTailSenderResponse) ProtoMessage() {} func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[50] + mi := &file_volume_server_proto_msgTypes[51] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2786,7 +2832,7 @@ func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead. func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{50} + return file_volume_server_proto_rawDescGZIP(), []int{51} } func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte { @@ -2829,7 +2875,7 @@ type VolumeTailReceiverRequest struct { func (x *VolumeTailReceiverRequest) Reset() { *x = VolumeTailReceiverRequest{} - mi := &file_volume_server_proto_msgTypes[51] + mi := &file_volume_server_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2841,7 +2887,7 @@ func (x *VolumeTailReceiverRequest) String() string { func (*VolumeTailReceiverRequest) ProtoMessage() {} func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[51] + mi := &file_volume_server_proto_msgTypes[52] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2854,7 +2900,7 @@ func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{51} + return file_volume_server_proto_rawDescGZIP(), []int{52} } func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 { @@ -2893,7 +2939,7 @@ type VolumeTailReceiverResponse struct { func (x *VolumeTailReceiverResponse) Reset() { *x = VolumeTailReceiverResponse{} - mi := &file_volume_server_proto_msgTypes[52] + mi := &file_volume_server_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2905,7 +2951,7 @@ func (x *VolumeTailReceiverResponse) String() string { func (*VolumeTailReceiverResponse) ProtoMessage() {} func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[52] + mi := &file_volume_server_proto_msgTypes[53] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2918,7 +2964,7 @@ func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{52} + return file_volume_server_proto_rawDescGZIP(), []int{53} } type VolumeEcShardsGenerateRequest struct { @@ -2931,7 +2977,7 @@ type VolumeEcShardsGenerateRequest struct { func (x *VolumeEcShardsGenerateRequest) Reset() { *x = VolumeEcShardsGenerateRequest{} - mi := &file_volume_server_proto_msgTypes[53] + mi := &file_volume_server_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2943,7 +2989,7 @@ func (x *VolumeEcShardsGenerateRequest) String() string { func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[53] + mi := &file_volume_server_proto_msgTypes[54] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2956,7 +3002,7 @@ func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{53} + return file_volume_server_proto_rawDescGZIP(), []int{54} } func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { @@ -2981,7 +3027,7 @@ type VolumeEcShardsGenerateResponse struct { func (x *VolumeEcShardsGenerateResponse) Reset() { *x = VolumeEcShardsGenerateResponse{} - mi := &file_volume_server_proto_msgTypes[54] + mi := &file_volume_server_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2993,7 +3039,7 @@ func (x *VolumeEcShardsGenerateResponse) String() string { func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[54] + mi := &file_volume_server_proto_msgTypes[55] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3006,7 +3052,7 @@ func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{54} + return file_volume_server_proto_rawDescGZIP(), []int{55} } type VolumeEcShardsRebuildRequest struct { @@ -3019,7 +3065,7 @@ type VolumeEcShardsRebuildRequest struct { func (x *VolumeEcShardsRebuildRequest) Reset() { *x = VolumeEcShardsRebuildRequest{} - mi := &file_volume_server_proto_msgTypes[55] + mi := &file_volume_server_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3031,7 +3077,7 @@ func (x *VolumeEcShardsRebuildRequest) String() string { func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[55] + mi := &file_volume_server_proto_msgTypes[56] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3044,7 +3090,7 @@ func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{55} + return file_volume_server_proto_rawDescGZIP(), []int{56} } func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { @@ -3070,7 +3116,7 @@ type VolumeEcShardsRebuildResponse struct { func (x *VolumeEcShardsRebuildResponse) Reset() { *x = VolumeEcShardsRebuildResponse{} - mi := &file_volume_server_proto_msgTypes[56] + mi := &file_volume_server_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3082,7 +3128,7 @@ func (x *VolumeEcShardsRebuildResponse) String() string { func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[56] + mi := &file_volume_server_proto_msgTypes[57] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3095,7 +3141,7 @@ func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{56} + return file_volume_server_proto_rawDescGZIP(), []int{57} } func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { @@ -3121,7 +3167,7 @@ type VolumeEcShardsCopyRequest struct { func (x *VolumeEcShardsCopyRequest) Reset() { *x = VolumeEcShardsCopyRequest{} - mi := &file_volume_server_proto_msgTypes[57] + mi := &file_volume_server_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3133,7 +3179,7 @@ func (x *VolumeEcShardsCopyRequest) String() string { func (*VolumeEcShardsCopyRequest) ProtoMessage() {} func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[57] + mi := &file_volume_server_proto_msgTypes[58] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3146,7 +3192,7 @@ func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{57} + return file_volume_server_proto_rawDescGZIP(), []int{58} } func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { @@ -3213,7 +3259,7 @@ type VolumeEcShardsCopyResponse struct { func (x *VolumeEcShardsCopyResponse) Reset() { *x = VolumeEcShardsCopyResponse{} - mi := &file_volume_server_proto_msgTypes[58] + mi := &file_volume_server_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3225,7 +3271,7 @@ func (x *VolumeEcShardsCopyResponse) String() string { func (*VolumeEcShardsCopyResponse) ProtoMessage() {} func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[58] + mi := &file_volume_server_proto_msgTypes[59] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3238,7 +3284,7 @@ func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{58} + return file_volume_server_proto_rawDescGZIP(), []int{59} } type VolumeEcShardsDeleteRequest struct { @@ -3252,7 +3298,7 @@ type VolumeEcShardsDeleteRequest struct { func (x *VolumeEcShardsDeleteRequest) Reset() { *x = VolumeEcShardsDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[59] + mi := &file_volume_server_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3264,7 +3310,7 @@ func (x *VolumeEcShardsDeleteRequest) String() string { func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[59] + mi := &file_volume_server_proto_msgTypes[60] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3277,7 +3323,7 @@ func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{59} + return file_volume_server_proto_rawDescGZIP(), []int{60} } func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { @@ -3309,7 +3355,7 @@ type VolumeEcShardsDeleteResponse struct { func (x *VolumeEcShardsDeleteResponse) Reset() { *x = VolumeEcShardsDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[60] + mi := &file_volume_server_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3321,7 +3367,7 @@ func (x *VolumeEcShardsDeleteResponse) String() string { func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[60] + mi := &file_volume_server_proto_msgTypes[61] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3334,7 +3380,7 @@ func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{60} + return file_volume_server_proto_rawDescGZIP(), []int{61} } type VolumeEcShardsMountRequest struct { @@ -3348,7 +3394,7 @@ type VolumeEcShardsMountRequest struct { func (x *VolumeEcShardsMountRequest) Reset() { *x = VolumeEcShardsMountRequest{} - mi := &file_volume_server_proto_msgTypes[61] + mi := &file_volume_server_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3360,7 +3406,7 @@ func (x *VolumeEcShardsMountRequest) String() string { func (*VolumeEcShardsMountRequest) ProtoMessage() {} func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[61] + mi := &file_volume_server_proto_msgTypes[62] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3373,7 +3419,7 @@ func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{61} + return file_volume_server_proto_rawDescGZIP(), []int{62} } func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 { @@ -3405,7 +3451,7 @@ type VolumeEcShardsMountResponse struct { func (x *VolumeEcShardsMountResponse) Reset() { *x = VolumeEcShardsMountResponse{} - mi := &file_volume_server_proto_msgTypes[62] + mi := &file_volume_server_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3417,7 +3463,7 @@ func (x *VolumeEcShardsMountResponse) String() string { func (*VolumeEcShardsMountResponse) ProtoMessage() {} func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[62] + mi := &file_volume_server_proto_msgTypes[63] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3430,7 +3476,7 @@ func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{62} + return file_volume_server_proto_rawDescGZIP(), []int{63} } type VolumeEcShardsUnmountRequest struct { @@ -3443,7 +3489,7 @@ type VolumeEcShardsUnmountRequest struct { func (x *VolumeEcShardsUnmountRequest) Reset() { *x = VolumeEcShardsUnmountRequest{} - mi := &file_volume_server_proto_msgTypes[63] + mi := &file_volume_server_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3455,7 +3501,7 @@ func (x *VolumeEcShardsUnmountRequest) String() string { func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[63] + mi := &file_volume_server_proto_msgTypes[64] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3468,7 +3514,7 @@ func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{63} + return file_volume_server_proto_rawDescGZIP(), []int{64} } func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { @@ -3493,7 +3539,7 @@ type VolumeEcShardsUnmountResponse struct { func (x *VolumeEcShardsUnmountResponse) Reset() { *x = VolumeEcShardsUnmountResponse{} - mi := &file_volume_server_proto_msgTypes[64] + mi := &file_volume_server_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3505,7 +3551,7 @@ func (x *VolumeEcShardsUnmountResponse) String() string { func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[64] + mi := &file_volume_server_proto_msgTypes[65] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3518,7 +3564,7 @@ func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{64} + return file_volume_server_proto_rawDescGZIP(), []int{65} } type VolumeEcShardReadRequest struct { @@ -3534,7 +3580,7 @@ type VolumeEcShardReadRequest struct { func (x *VolumeEcShardReadRequest) Reset() { *x = VolumeEcShardReadRequest{} - mi := &file_volume_server_proto_msgTypes[65] + mi := &file_volume_server_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3546,7 +3592,7 @@ func (x *VolumeEcShardReadRequest) String() string { func (*VolumeEcShardReadRequest) ProtoMessage() {} func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[65] + mi := &file_volume_server_proto_msgTypes[66] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3559,7 +3605,7 @@ func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{65} + return file_volume_server_proto_rawDescGZIP(), []int{66} } func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 { @@ -3607,7 +3653,7 @@ type VolumeEcShardReadResponse struct { func (x *VolumeEcShardReadResponse) Reset() { *x = VolumeEcShardReadResponse{} - mi := &file_volume_server_proto_msgTypes[66] + mi := &file_volume_server_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3619,7 +3665,7 @@ func (x *VolumeEcShardReadResponse) String() string { func (*VolumeEcShardReadResponse) ProtoMessage() {} func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[66] + mi := &file_volume_server_proto_msgTypes[67] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3632,7 +3678,7 @@ func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{66} + return file_volume_server_proto_rawDescGZIP(), []int{67} } func (x *VolumeEcShardReadResponse) GetData() []byte { @@ -3661,7 +3707,7 @@ type VolumeEcBlobDeleteRequest struct { func (x *VolumeEcBlobDeleteRequest) Reset() { *x = VolumeEcBlobDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[67] + mi := &file_volume_server_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3673,7 +3719,7 @@ func (x *VolumeEcBlobDeleteRequest) String() string { func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[67] + mi := &file_volume_server_proto_msgTypes[68] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3686,7 +3732,7 @@ func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{67} + return file_volume_server_proto_rawDescGZIP(), []int{68} } func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { @@ -3725,7 +3771,7 @@ type VolumeEcBlobDeleteResponse struct { func (x *VolumeEcBlobDeleteResponse) Reset() { *x = VolumeEcBlobDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[68] + mi := &file_volume_server_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3737,7 +3783,7 @@ func (x *VolumeEcBlobDeleteResponse) String() string { func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[68] + mi := &file_volume_server_proto_msgTypes[69] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3750,7 +3796,7 @@ func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{68} + return file_volume_server_proto_rawDescGZIP(), []int{69} } type VolumeEcShardsToVolumeRequest struct { @@ -3763,7 +3809,7 @@ type VolumeEcShardsToVolumeRequest struct { func (x *VolumeEcShardsToVolumeRequest) Reset() { *x = VolumeEcShardsToVolumeRequest{} - mi := &file_volume_server_proto_msgTypes[69] + mi := &file_volume_server_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3775,7 +3821,7 @@ func (x *VolumeEcShardsToVolumeRequest) String() string { func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[69] + mi := &file_volume_server_proto_msgTypes[70] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3788,7 +3834,7 @@ func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{69} + return file_volume_server_proto_rawDescGZIP(), []int{70} } func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { @@ -3813,7 +3859,7 @@ type VolumeEcShardsToVolumeResponse struct { func (x *VolumeEcShardsToVolumeResponse) Reset() { *x = VolumeEcShardsToVolumeResponse{} - mi := &file_volume_server_proto_msgTypes[70] + mi := &file_volume_server_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3825,7 +3871,7 @@ func (x *VolumeEcShardsToVolumeResponse) String() string { func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[70] + mi := &file_volume_server_proto_msgTypes[71] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3838,7 +3884,7 @@ func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{70} + return file_volume_server_proto_rawDescGZIP(), []int{71} } type VolumeEcShardsInfoRequest struct { @@ -3850,7 +3896,7 @@ type VolumeEcShardsInfoRequest struct { func (x *VolumeEcShardsInfoRequest) Reset() { *x = VolumeEcShardsInfoRequest{} - mi := &file_volume_server_proto_msgTypes[71] + mi := &file_volume_server_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3862,7 +3908,7 @@ func (x *VolumeEcShardsInfoRequest) String() string { func (*VolumeEcShardsInfoRequest) ProtoMessage() {} func (x *VolumeEcShardsInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[71] + mi := &file_volume_server_proto_msgTypes[72] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3875,7 +3921,7 @@ func (x *VolumeEcShardsInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsInfoRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsInfoRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{71} + return file_volume_server_proto_rawDescGZIP(), []int{72} } func (x *VolumeEcShardsInfoRequest) GetVolumeId() uint32 { @@ -3894,7 +3940,7 @@ type VolumeEcShardsInfoResponse struct { func (x *VolumeEcShardsInfoResponse) Reset() { *x = VolumeEcShardsInfoResponse{} - mi := &file_volume_server_proto_msgTypes[72] + mi := &file_volume_server_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3906,7 +3952,7 @@ func (x *VolumeEcShardsInfoResponse) String() string { func (*VolumeEcShardsInfoResponse) ProtoMessage() {} func (x *VolumeEcShardsInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[72] + mi := &file_volume_server_proto_msgTypes[73] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3919,7 +3965,7 @@ func (x *VolumeEcShardsInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsInfoResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsInfoResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{72} + return file_volume_server_proto_rawDescGZIP(), []int{73} } func (x *VolumeEcShardsInfoResponse) GetEcShardInfos() []*EcShardInfo { @@ -3940,7 +3986,7 @@ type EcShardInfo struct { func (x *EcShardInfo) Reset() { *x = EcShardInfo{} - mi := &file_volume_server_proto_msgTypes[73] + mi := &file_volume_server_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3952,7 +3998,7 @@ func (x *EcShardInfo) String() string { func (*EcShardInfo) ProtoMessage() {} func (x *EcShardInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[73] + mi := &file_volume_server_proto_msgTypes[74] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3965,7 +4011,7 @@ func (x *EcShardInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use EcShardInfo.ProtoReflect.Descriptor instead. func (*EcShardInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{73} + return file_volume_server_proto_rawDescGZIP(), []int{74} } func (x *EcShardInfo) GetShardId() uint32 { @@ -3998,7 +4044,7 @@ type ReadVolumeFileStatusRequest struct { func (x *ReadVolumeFileStatusRequest) Reset() { *x = ReadVolumeFileStatusRequest{} - mi := &file_volume_server_proto_msgTypes[74] + mi := &file_volume_server_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4010,7 +4056,7 @@ func (x *ReadVolumeFileStatusRequest) String() string { func (*ReadVolumeFileStatusRequest) ProtoMessage() {} func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[74] + mi := &file_volume_server_proto_msgTypes[75] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4023,7 +4069,7 @@ func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74} + return file_volume_server_proto_rawDescGZIP(), []int{75} } func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { @@ -4052,7 +4098,7 @@ type ReadVolumeFileStatusResponse struct { func (x *ReadVolumeFileStatusResponse) Reset() { *x = ReadVolumeFileStatusResponse{} - mi := &file_volume_server_proto_msgTypes[75] + mi := &file_volume_server_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4064,7 +4110,7 @@ func (x *ReadVolumeFileStatusResponse) String() string { func (*ReadVolumeFileStatusResponse) ProtoMessage() {} func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[75] + mi := &file_volume_server_proto_msgTypes[76] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4077,7 +4123,7 @@ func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{75} + return file_volume_server_proto_rawDescGZIP(), []int{76} } func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { @@ -4172,7 +4218,7 @@ type DiskStatus struct { func (x *DiskStatus) Reset() { *x = DiskStatus{} - mi := &file_volume_server_proto_msgTypes[76] + mi := &file_volume_server_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4184,7 +4230,7 @@ func (x *DiskStatus) String() string { func (*DiskStatus) ProtoMessage() {} func (x *DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[76] + mi := &file_volume_server_proto_msgTypes[77] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4197,7 +4243,7 @@ func (x *DiskStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead. func (*DiskStatus) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{76} + return file_volume_server_proto_rawDescGZIP(), []int{77} } func (x *DiskStatus) GetDir() string { @@ -4264,7 +4310,7 @@ type MemStatus struct { func (x *MemStatus) Reset() { *x = MemStatus{} - mi := &file_volume_server_proto_msgTypes[77] + mi := &file_volume_server_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4276,7 +4322,7 @@ func (x *MemStatus) String() string { func (*MemStatus) ProtoMessage() {} func (x *MemStatus) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[77] + mi := &file_volume_server_proto_msgTypes[78] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4289,7 +4335,7 @@ func (x *MemStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use MemStatus.ProtoReflect.Descriptor instead. func (*MemStatus) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{77} + return file_volume_server_proto_rawDescGZIP(), []int{78} } func (x *MemStatus) GetGoroutines() int32 { @@ -4357,7 +4403,7 @@ type RemoteFile struct { func (x *RemoteFile) Reset() { *x = RemoteFile{} - mi := &file_volume_server_proto_msgTypes[78] + mi := &file_volume_server_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4369,7 +4415,7 @@ func (x *RemoteFile) String() string { func (*RemoteFile) ProtoMessage() {} func (x *RemoteFile) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[78] + mi := &file_volume_server_proto_msgTypes[79] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4382,7 +4428,7 @@ func (x *RemoteFile) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead. func (*RemoteFile) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{78} + return file_volume_server_proto_rawDescGZIP(), []int{79} } func (x *RemoteFile) GetBackendType() string { @@ -4450,7 +4496,7 @@ type VolumeInfo struct { func (x *VolumeInfo) Reset() { *x = VolumeInfo{} - mi := &file_volume_server_proto_msgTypes[79] + mi := &file_volume_server_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4462,7 +4508,7 @@ func (x *VolumeInfo) String() string { func (*VolumeInfo) ProtoMessage() {} func (x *VolumeInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[79] + mi := &file_volume_server_proto_msgTypes[80] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4475,7 +4521,7 @@ func (x *VolumeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead. func (*VolumeInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{79} + return file_volume_server_proto_rawDescGZIP(), []int{80} } func (x *VolumeInfo) GetFiles() []*RemoteFile { @@ -4545,7 +4591,7 @@ type EcShardConfig struct { func (x *EcShardConfig) Reset() { *x = EcShardConfig{} - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4557,7 +4603,7 @@ func (x *EcShardConfig) String() string { func (*EcShardConfig) ProtoMessage() {} func (x *EcShardConfig) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[81] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4570,7 +4616,7 @@ func (x *EcShardConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use EcShardConfig.ProtoReflect.Descriptor instead. func (*EcShardConfig) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{80} + return file_volume_server_proto_rawDescGZIP(), []int{81} } func (x *EcShardConfig) GetDataShards() uint32 { @@ -4602,7 +4648,7 @@ type OldVersionVolumeInfo struct { func (x *OldVersionVolumeInfo) Reset() { *x = OldVersionVolumeInfo{} - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4614,7 +4660,7 @@ func (x *OldVersionVolumeInfo) String() string { func (*OldVersionVolumeInfo) ProtoMessage() {} func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[82] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4627,7 +4673,7 @@ func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use OldVersionVolumeInfo.ProtoReflect.Descriptor instead. func (*OldVersionVolumeInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{81} + return file_volume_server_proto_rawDescGZIP(), []int{82} } func (x *OldVersionVolumeInfo) GetFiles() []*RemoteFile { @@ -4692,7 +4738,7 @@ type VolumeTierMoveDatToRemoteRequest struct { func (x *VolumeTierMoveDatToRemoteRequest) Reset() { *x = VolumeTierMoveDatToRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4704,7 +4750,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) String() string { func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4717,7 +4763,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{82} + return file_volume_server_proto_rawDescGZIP(), []int{83} } func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { @@ -4758,7 +4804,7 @@ type VolumeTierMoveDatToRemoteResponse struct { func (x *VolumeTierMoveDatToRemoteResponse) Reset() { *x = VolumeTierMoveDatToRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4770,7 +4816,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) String() string { func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4783,7 +4829,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{83} + return file_volume_server_proto_rawDescGZIP(), []int{84} } func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { @@ -4811,7 +4857,7 @@ type VolumeTierMoveDatFromRemoteRequest struct { func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { *x = VolumeTierMoveDatFromRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4823,7 +4869,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) String() string { func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[85] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4836,7 +4882,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{84} + return file_volume_server_proto_rawDescGZIP(), []int{85} } func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { @@ -4870,7 +4916,7 @@ type VolumeTierMoveDatFromRemoteResponse struct { func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { *x = VolumeTierMoveDatFromRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4882,7 +4928,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) String() string { func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4895,7 +4941,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Messag // Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{85} + return file_volume_server_proto_rawDescGZIP(), []int{86} } func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { @@ -4920,7 +4966,7 @@ type VolumeServerStatusRequest struct { func (x *VolumeServerStatusRequest) Reset() { *x = VolumeServerStatusRequest{} - mi := &file_volume_server_proto_msgTypes[86] + mi := &file_volume_server_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4932,7 +4978,7 @@ func (x *VolumeServerStatusRequest) String() string { func (*VolumeServerStatusRequest) ProtoMessage() {} func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[86] + mi := &file_volume_server_proto_msgTypes[87] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4945,7 +4991,7 @@ func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{86} + return file_volume_server_proto_rawDescGZIP(), []int{87} } type VolumeServerStatusResponse struct { @@ -4961,7 +5007,7 @@ type VolumeServerStatusResponse struct { func (x *VolumeServerStatusResponse) Reset() { *x = VolumeServerStatusResponse{} - mi := &file_volume_server_proto_msgTypes[87] + mi := &file_volume_server_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4973,7 +5019,7 @@ func (x *VolumeServerStatusResponse) String() string { func (*VolumeServerStatusResponse) ProtoMessage() {} func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[87] + mi := &file_volume_server_proto_msgTypes[88] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4986,7 +5032,7 @@ func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{87} + return file_volume_server_proto_rawDescGZIP(), []int{88} } func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { @@ -5032,7 +5078,7 @@ type VolumeServerLeaveRequest struct { func (x *VolumeServerLeaveRequest) Reset() { *x = VolumeServerLeaveRequest{} - mi := &file_volume_server_proto_msgTypes[88] + mi := &file_volume_server_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5044,7 +5090,7 @@ func (x *VolumeServerLeaveRequest) String() string { func (*VolumeServerLeaveRequest) ProtoMessage() {} func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[88] + mi := &file_volume_server_proto_msgTypes[89] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5057,7 +5103,7 @@ func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{88} + return file_volume_server_proto_rawDescGZIP(), []int{89} } type VolumeServerLeaveResponse struct { @@ -5068,7 +5114,7 @@ type VolumeServerLeaveResponse struct { func (x *VolumeServerLeaveResponse) Reset() { *x = VolumeServerLeaveResponse{} - mi := &file_volume_server_proto_msgTypes[89] + mi := &file_volume_server_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5080,7 +5126,7 @@ func (x *VolumeServerLeaveResponse) String() string { func (*VolumeServerLeaveResponse) ProtoMessage() {} func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[89] + mi := &file_volume_server_proto_msgTypes[90] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5093,7 +5139,7 @@ func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{89} + return file_volume_server_proto_rawDescGZIP(), []int{90} } // remote storage @@ -5115,7 +5161,7 @@ type FetchAndWriteNeedleRequest struct { func (x *FetchAndWriteNeedleRequest) Reset() { *x = FetchAndWriteNeedleRequest{} - mi := &file_volume_server_proto_msgTypes[90] + mi := &file_volume_server_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5127,7 +5173,7 @@ func (x *FetchAndWriteNeedleRequest) String() string { func (*FetchAndWriteNeedleRequest) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[90] + mi := &file_volume_server_proto_msgTypes[91] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5140,7 +5186,7 @@ func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleRequest.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{90} + return file_volume_server_proto_rawDescGZIP(), []int{91} } func (x *FetchAndWriteNeedleRequest) GetVolumeId() uint32 { @@ -5215,7 +5261,7 @@ type FetchAndWriteNeedleResponse struct { func (x *FetchAndWriteNeedleResponse) Reset() { *x = FetchAndWriteNeedleResponse{} - mi := &file_volume_server_proto_msgTypes[91] + mi := &file_volume_server_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5227,7 +5273,7 @@ func (x *FetchAndWriteNeedleResponse) String() string { func (*FetchAndWriteNeedleResponse) ProtoMessage() {} func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[91] + mi := &file_volume_server_proto_msgTypes[92] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5240,7 +5286,7 @@ func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleResponse.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91} + return file_volume_server_proto_rawDescGZIP(), []int{92} } func (x *FetchAndWriteNeedleResponse) GetETag() string { @@ -5264,7 +5310,7 @@ type QueryRequest struct { func (x *QueryRequest) Reset() { *x = QueryRequest{} - mi := &file_volume_server_proto_msgTypes[92] + mi := &file_volume_server_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5276,7 +5322,7 @@ func (x *QueryRequest) String() string { func (*QueryRequest) ProtoMessage() {} func (x *QueryRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[92] + mi := &file_volume_server_proto_msgTypes[93] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5289,7 +5335,7 @@ func (x *QueryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. func (*QueryRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92} + return file_volume_server_proto_rawDescGZIP(), []int{93} } func (x *QueryRequest) GetSelections() []string { @@ -5336,7 +5382,7 @@ type QueriedStripe struct { func (x *QueriedStripe) Reset() { *x = QueriedStripe{} - mi := &file_volume_server_proto_msgTypes[93] + mi := &file_volume_server_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5348,7 +5394,7 @@ func (x *QueriedStripe) String() string { func (*QueriedStripe) ProtoMessage() {} func (x *QueriedStripe) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[93] + mi := &file_volume_server_proto_msgTypes[94] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5361,7 +5407,7 @@ func (x *QueriedStripe) ProtoReflect() protoreflect.Message { // Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. func (*QueriedStripe) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{93} + return file_volume_server_proto_rawDescGZIP(), []int{94} } func (x *QueriedStripe) GetRecords() []byte { @@ -5381,7 +5427,7 @@ type VolumeNeedleStatusRequest struct { func (x *VolumeNeedleStatusRequest) Reset() { *x = VolumeNeedleStatusRequest{} - mi := &file_volume_server_proto_msgTypes[94] + mi := &file_volume_server_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5393,7 +5439,7 @@ func (x *VolumeNeedleStatusRequest) String() string { func (*VolumeNeedleStatusRequest) ProtoMessage() {} func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[94] + mi := &file_volume_server_proto_msgTypes[95] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5406,7 +5452,7 @@ func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{94} + return file_volume_server_proto_rawDescGZIP(), []int{95} } func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { @@ -5437,7 +5483,7 @@ type VolumeNeedleStatusResponse struct { func (x *VolumeNeedleStatusResponse) Reset() { *x = VolumeNeedleStatusResponse{} - mi := &file_volume_server_proto_msgTypes[95] + mi := &file_volume_server_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5449,7 +5495,7 @@ func (x *VolumeNeedleStatusResponse) String() string { func (*VolumeNeedleStatusResponse) ProtoMessage() {} func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[95] + mi := &file_volume_server_proto_msgTypes[96] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5462,7 +5508,7 @@ func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{95} + return file_volume_server_proto_rawDescGZIP(), []int{96} } func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { @@ -5517,7 +5563,7 @@ type PingRequest struct { func (x *PingRequest) Reset() { *x = PingRequest{} - mi := &file_volume_server_proto_msgTypes[96] + mi := &file_volume_server_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5529,7 +5575,7 @@ func (x *PingRequest) String() string { func (*PingRequest) ProtoMessage() {} func (x *PingRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[96] + mi := &file_volume_server_proto_msgTypes[97] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5542,7 +5588,7 @@ func (x *PingRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. func (*PingRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{96} + return file_volume_server_proto_rawDescGZIP(), []int{97} } func (x *PingRequest) GetTarget() string { @@ -5570,7 +5616,7 @@ type PingResponse struct { func (x *PingResponse) Reset() { *x = PingResponse{} - mi := &file_volume_server_proto_msgTypes[97] + mi := &file_volume_server_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5582,7 +5628,7 @@ func (x *PingResponse) String() string { func (*PingResponse) ProtoMessage() {} func (x *PingResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[97] + mi := &file_volume_server_proto_msgTypes[98] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5595,7 +5641,7 @@ func (x *PingResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. func (*PingResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{97} + return file_volume_server_proto_rawDescGZIP(), []int{98} } func (x *PingResponse) GetStartTimeNs() int64 { @@ -5630,7 +5676,7 @@ type FetchAndWriteNeedleRequest_Replica struct { func (x *FetchAndWriteNeedleRequest_Replica) Reset() { *x = FetchAndWriteNeedleRequest_Replica{} - mi := &file_volume_server_proto_msgTypes[98] + mi := &file_volume_server_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5642,7 +5688,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) String() string { func (*FetchAndWriteNeedleRequest_Replica) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[98] + mi := &file_volume_server_proto_msgTypes[99] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5655,7 +5701,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message // Deprecated: Use FetchAndWriteNeedleRequest_Replica.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest_Replica) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{90, 0} + return file_volume_server_proto_rawDescGZIP(), []int{91, 0} } func (x *FetchAndWriteNeedleRequest_Replica) GetUrl() string { @@ -5690,7 +5736,7 @@ type QueryRequest_Filter struct { func (x *QueryRequest_Filter) Reset() { *x = QueryRequest_Filter{} - mi := &file_volume_server_proto_msgTypes[99] + mi := &file_volume_server_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5702,7 +5748,7 @@ func (x *QueryRequest_Filter) String() string { func (*QueryRequest_Filter) ProtoMessage() {} func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[99] + mi := &file_volume_server_proto_msgTypes[100] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5715,7 +5761,7 @@ func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 0} + return file_volume_server_proto_rawDescGZIP(), []int{93, 0} } func (x *QueryRequest_Filter) GetField() string { @@ -5752,7 +5798,7 @@ type QueryRequest_InputSerialization struct { func (x *QueryRequest_InputSerialization) Reset() { *x = QueryRequest_InputSerialization{} - mi := &file_volume_server_proto_msgTypes[100] + mi := &file_volume_server_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5764,7 +5810,7 @@ func (x *QueryRequest_InputSerialization) String() string { func (*QueryRequest_InputSerialization) ProtoMessage() {} func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[100] + mi := &file_volume_server_proto_msgTypes[101] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5777,7 +5823,7 @@ func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 1} + return file_volume_server_proto_rawDescGZIP(), []int{93, 1} } func (x *QueryRequest_InputSerialization) GetCompressionType() string { @@ -5818,7 +5864,7 @@ type QueryRequest_OutputSerialization struct { func (x *QueryRequest_OutputSerialization) Reset() { *x = QueryRequest_OutputSerialization{} - mi := &file_volume_server_proto_msgTypes[101] + mi := &file_volume_server_proto_msgTypes[102] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5830,7 +5876,7 @@ func (x *QueryRequest_OutputSerialization) String() string { func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[101] + mi := &file_volume_server_proto_msgTypes[102] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5843,7 +5889,7 @@ func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 2} + return file_volume_server_proto_rawDescGZIP(), []int{93, 2} } func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -5876,7 +5922,7 @@ type QueryRequest_InputSerialization_CSVInput struct { func (x *QueryRequest_InputSerialization_CSVInput) Reset() { *x = QueryRequest_InputSerialization_CSVInput{} - mi := &file_volume_server_proto_msgTypes[102] + mi := &file_volume_server_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5888,7 +5934,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) String() string { func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[102] + mi := &file_volume_server_proto_msgTypes[103] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5901,7 +5947,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.M // Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 0} + return file_volume_server_proto_rawDescGZIP(), []int{93, 1, 0} } func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -5962,7 +6008,7 @@ type QueryRequest_InputSerialization_JSONInput struct { func (x *QueryRequest_InputSerialization_JSONInput) Reset() { *x = QueryRequest_InputSerialization_JSONInput{} - mi := &file_volume_server_proto_msgTypes[103] + mi := &file_volume_server_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5974,7 +6020,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) String() string { func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[103] + mi := &file_volume_server_proto_msgTypes[104] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5987,7 +6033,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect. // Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 1} + return file_volume_server_proto_rawDescGZIP(), []int{93, 1, 1} } func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -6005,7 +6051,7 @@ type QueryRequest_InputSerialization_ParquetInput struct { func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { *x = QueryRequest_InputSerialization_ParquetInput{} - mi := &file_volume_server_proto_msgTypes[104] + mi := &file_volume_server_proto_msgTypes[105] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6017,7 +6063,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) String() string { func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[104] + mi := &file_volume_server_proto_msgTypes[105] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6030,7 +6076,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protorefle // Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 1, 2} + return file_volume_server_proto_rawDescGZIP(), []int{93, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { @@ -6046,7 +6092,7 @@ type QueryRequest_OutputSerialization_CSVOutput struct { func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { *x = QueryRequest_OutputSerialization_CSVOutput{} - mi := &file_volume_server_proto_msgTypes[105] + mi := &file_volume_server_proto_msgTypes[106] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6058,7 +6104,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[105] + mi := &file_volume_server_proto_msgTypes[106] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6071,7 +6117,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect // Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 2, 0} + return file_volume_server_proto_rawDescGZIP(), []int{93, 2, 0} } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -6118,7 +6164,7 @@ type QueryRequest_OutputSerialization_JSONOutput struct { func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { *x = QueryRequest_OutputSerialization_JSONOutput{} - mi := &file_volume_server_proto_msgTypes[106] + mi := &file_volume_server_proto_msgTypes[107] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6130,7 +6176,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[106] + mi := &file_volume_server_proto_msgTypes[107] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6143,7 +6189,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflec // Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92, 2, 1} + return file_volume_server_proto_rawDescGZIP(), []int{93, 2, 1} } func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -6157,7 +6203,9 @@ var File_volume_server_proto protoreflect.FileDescriptor const file_volume_server_proto_rawDesc = "" + "\n" + - "\x13volume_server.proto\x12\x10volume_server_pb\x1a\fremote.proto\"[\n" + + "\x13volume_server.proto\x12\x10volume_server_pb\x1a\fremote.proto\"5\n" + + "\x11VolumeServerState\x12 \n" + + "\vmaintenance\x18\x01 \x01(\bR\vmaintenance\"[\n" + "\x12BatchDeleteRequest\x12\x19\n" + "\bfile_ids\x18\x01 \x03(\tR\afileIds\x12*\n" + "\x11skip_cookie_check\x18\x02 \x01(\bR\x0fskipCookieCheck\"O\n" + @@ -6678,227 +6726,228 @@ func file_volume_server_proto_rawDescGZIP() []byte { return file_volume_server_proto_rawDescData } -var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 107) +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 108) var file_volume_server_proto_goTypes = []any{ - (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest - (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse - (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult - (*Empty)(nil), // 3: volume_server_pb.Empty - (*VacuumVolumeCheckRequest)(nil), // 4: volume_server_pb.VacuumVolumeCheckRequest - (*VacuumVolumeCheckResponse)(nil), // 5: volume_server_pb.VacuumVolumeCheckResponse - (*VacuumVolumeCompactRequest)(nil), // 6: volume_server_pb.VacuumVolumeCompactRequest - (*VacuumVolumeCompactResponse)(nil), // 7: volume_server_pb.VacuumVolumeCompactResponse - (*VacuumVolumeCommitRequest)(nil), // 8: volume_server_pb.VacuumVolumeCommitRequest - (*VacuumVolumeCommitResponse)(nil), // 9: volume_server_pb.VacuumVolumeCommitResponse - (*VacuumVolumeCleanupRequest)(nil), // 10: volume_server_pb.VacuumVolumeCleanupRequest - (*VacuumVolumeCleanupResponse)(nil), // 11: volume_server_pb.VacuumVolumeCleanupResponse - (*DeleteCollectionRequest)(nil), // 12: volume_server_pb.DeleteCollectionRequest - (*DeleteCollectionResponse)(nil), // 13: volume_server_pb.DeleteCollectionResponse - (*AllocateVolumeRequest)(nil), // 14: volume_server_pb.AllocateVolumeRequest - (*AllocateVolumeResponse)(nil), // 15: volume_server_pb.AllocateVolumeResponse - (*VolumeSyncStatusRequest)(nil), // 16: volume_server_pb.VolumeSyncStatusRequest - (*VolumeSyncStatusResponse)(nil), // 17: volume_server_pb.VolumeSyncStatusResponse - (*VolumeIncrementalCopyRequest)(nil), // 18: volume_server_pb.VolumeIncrementalCopyRequest - (*VolumeIncrementalCopyResponse)(nil), // 19: volume_server_pb.VolumeIncrementalCopyResponse - (*VolumeMountRequest)(nil), // 20: volume_server_pb.VolumeMountRequest - (*VolumeMountResponse)(nil), // 21: volume_server_pb.VolumeMountResponse - (*VolumeUnmountRequest)(nil), // 22: volume_server_pb.VolumeUnmountRequest - (*VolumeUnmountResponse)(nil), // 23: volume_server_pb.VolumeUnmountResponse - (*VolumeDeleteRequest)(nil), // 24: volume_server_pb.VolumeDeleteRequest - (*VolumeDeleteResponse)(nil), // 25: volume_server_pb.VolumeDeleteResponse - (*VolumeMarkReadonlyRequest)(nil), // 26: volume_server_pb.VolumeMarkReadonlyRequest - (*VolumeMarkReadonlyResponse)(nil), // 27: volume_server_pb.VolumeMarkReadonlyResponse - (*VolumeMarkWritableRequest)(nil), // 28: volume_server_pb.VolumeMarkWritableRequest - (*VolumeMarkWritableResponse)(nil), // 29: volume_server_pb.VolumeMarkWritableResponse - (*VolumeConfigureRequest)(nil), // 30: volume_server_pb.VolumeConfigureRequest - (*VolumeConfigureResponse)(nil), // 31: volume_server_pb.VolumeConfigureResponse - (*VolumeStatusRequest)(nil), // 32: volume_server_pb.VolumeStatusRequest - (*VolumeStatusResponse)(nil), // 33: volume_server_pb.VolumeStatusResponse - (*VolumeCopyRequest)(nil), // 34: volume_server_pb.VolumeCopyRequest - (*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse - (*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest - (*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse - (*ReceiveFileRequest)(nil), // 38: volume_server_pb.ReceiveFileRequest - (*ReceiveFileInfo)(nil), // 39: volume_server_pb.ReceiveFileInfo - (*ReceiveFileResponse)(nil), // 40: volume_server_pb.ReceiveFileResponse - (*ReadNeedleBlobRequest)(nil), // 41: volume_server_pb.ReadNeedleBlobRequest - (*ReadNeedleBlobResponse)(nil), // 42: volume_server_pb.ReadNeedleBlobResponse - (*ReadNeedleMetaRequest)(nil), // 43: volume_server_pb.ReadNeedleMetaRequest - (*ReadNeedleMetaResponse)(nil), // 44: volume_server_pb.ReadNeedleMetaResponse - (*WriteNeedleBlobRequest)(nil), // 45: volume_server_pb.WriteNeedleBlobRequest - (*WriteNeedleBlobResponse)(nil), // 46: volume_server_pb.WriteNeedleBlobResponse - (*ReadAllNeedlesRequest)(nil), // 47: volume_server_pb.ReadAllNeedlesRequest - (*ReadAllNeedlesResponse)(nil), // 48: volume_server_pb.ReadAllNeedlesResponse - (*VolumeTailSenderRequest)(nil), // 49: volume_server_pb.VolumeTailSenderRequest - (*VolumeTailSenderResponse)(nil), // 50: volume_server_pb.VolumeTailSenderResponse - (*VolumeTailReceiverRequest)(nil), // 51: volume_server_pb.VolumeTailReceiverRequest - (*VolumeTailReceiverResponse)(nil), // 52: volume_server_pb.VolumeTailReceiverResponse - (*VolumeEcShardsGenerateRequest)(nil), // 53: volume_server_pb.VolumeEcShardsGenerateRequest - (*VolumeEcShardsGenerateResponse)(nil), // 54: volume_server_pb.VolumeEcShardsGenerateResponse - (*VolumeEcShardsRebuildRequest)(nil), // 55: volume_server_pb.VolumeEcShardsRebuildRequest - (*VolumeEcShardsRebuildResponse)(nil), // 56: volume_server_pb.VolumeEcShardsRebuildResponse - (*VolumeEcShardsCopyRequest)(nil), // 57: volume_server_pb.VolumeEcShardsCopyRequest - (*VolumeEcShardsCopyResponse)(nil), // 58: volume_server_pb.VolumeEcShardsCopyResponse - (*VolumeEcShardsDeleteRequest)(nil), // 59: volume_server_pb.VolumeEcShardsDeleteRequest - (*VolumeEcShardsDeleteResponse)(nil), // 60: volume_server_pb.VolumeEcShardsDeleteResponse - (*VolumeEcShardsMountRequest)(nil), // 61: volume_server_pb.VolumeEcShardsMountRequest - (*VolumeEcShardsMountResponse)(nil), // 62: volume_server_pb.VolumeEcShardsMountResponse - (*VolumeEcShardsUnmountRequest)(nil), // 63: volume_server_pb.VolumeEcShardsUnmountRequest - (*VolumeEcShardsUnmountResponse)(nil), // 64: volume_server_pb.VolumeEcShardsUnmountResponse - (*VolumeEcShardReadRequest)(nil), // 65: volume_server_pb.VolumeEcShardReadRequest - (*VolumeEcShardReadResponse)(nil), // 66: volume_server_pb.VolumeEcShardReadResponse - (*VolumeEcBlobDeleteRequest)(nil), // 67: volume_server_pb.VolumeEcBlobDeleteRequest - (*VolumeEcBlobDeleteResponse)(nil), // 68: volume_server_pb.VolumeEcBlobDeleteResponse - (*VolumeEcShardsToVolumeRequest)(nil), // 69: volume_server_pb.VolumeEcShardsToVolumeRequest - (*VolumeEcShardsToVolumeResponse)(nil), // 70: volume_server_pb.VolumeEcShardsToVolumeResponse - (*VolumeEcShardsInfoRequest)(nil), // 71: volume_server_pb.VolumeEcShardsInfoRequest - (*VolumeEcShardsInfoResponse)(nil), // 72: volume_server_pb.VolumeEcShardsInfoResponse - (*EcShardInfo)(nil), // 73: volume_server_pb.EcShardInfo - (*ReadVolumeFileStatusRequest)(nil), // 74: volume_server_pb.ReadVolumeFileStatusRequest - (*ReadVolumeFileStatusResponse)(nil), // 75: volume_server_pb.ReadVolumeFileStatusResponse - (*DiskStatus)(nil), // 76: volume_server_pb.DiskStatus - (*MemStatus)(nil), // 77: volume_server_pb.MemStatus - (*RemoteFile)(nil), // 78: volume_server_pb.RemoteFile - (*VolumeInfo)(nil), // 79: volume_server_pb.VolumeInfo - (*EcShardConfig)(nil), // 80: volume_server_pb.EcShardConfig - (*OldVersionVolumeInfo)(nil), // 81: volume_server_pb.OldVersionVolumeInfo - (*VolumeTierMoveDatToRemoteRequest)(nil), // 82: volume_server_pb.VolumeTierMoveDatToRemoteRequest - (*VolumeTierMoveDatToRemoteResponse)(nil), // 83: volume_server_pb.VolumeTierMoveDatToRemoteResponse - (*VolumeTierMoveDatFromRemoteRequest)(nil), // 84: volume_server_pb.VolumeTierMoveDatFromRemoteRequest - (*VolumeTierMoveDatFromRemoteResponse)(nil), // 85: volume_server_pb.VolumeTierMoveDatFromRemoteResponse - (*VolumeServerStatusRequest)(nil), // 86: volume_server_pb.VolumeServerStatusRequest - (*VolumeServerStatusResponse)(nil), // 87: volume_server_pb.VolumeServerStatusResponse - (*VolumeServerLeaveRequest)(nil), // 88: volume_server_pb.VolumeServerLeaveRequest - (*VolumeServerLeaveResponse)(nil), // 89: volume_server_pb.VolumeServerLeaveResponse - (*FetchAndWriteNeedleRequest)(nil), // 90: volume_server_pb.FetchAndWriteNeedleRequest - (*FetchAndWriteNeedleResponse)(nil), // 91: volume_server_pb.FetchAndWriteNeedleResponse - (*QueryRequest)(nil), // 92: volume_server_pb.QueryRequest - (*QueriedStripe)(nil), // 93: volume_server_pb.QueriedStripe - (*VolumeNeedleStatusRequest)(nil), // 94: volume_server_pb.VolumeNeedleStatusRequest - (*VolumeNeedleStatusResponse)(nil), // 95: volume_server_pb.VolumeNeedleStatusResponse - (*PingRequest)(nil), // 96: volume_server_pb.PingRequest - (*PingResponse)(nil), // 97: volume_server_pb.PingResponse - (*FetchAndWriteNeedleRequest_Replica)(nil), // 98: volume_server_pb.FetchAndWriteNeedleRequest.Replica - (*QueryRequest_Filter)(nil), // 99: volume_server_pb.QueryRequest.Filter - (*QueryRequest_InputSerialization)(nil), // 100: volume_server_pb.QueryRequest.InputSerialization - (*QueryRequest_OutputSerialization)(nil), // 101: volume_server_pb.QueryRequest.OutputSerialization - (*QueryRequest_InputSerialization_CSVInput)(nil), // 102: volume_server_pb.QueryRequest.InputSerialization.CSVInput - (*QueryRequest_InputSerialization_JSONInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.JSONInput - (*QueryRequest_InputSerialization_ParquetInput)(nil), // 104: volume_server_pb.QueryRequest.InputSerialization.ParquetInput - (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 105: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 106: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - (*remote_pb.RemoteConf)(nil), // 107: remote_pb.RemoteConf - (*remote_pb.RemoteStorageLocation)(nil), // 108: remote_pb.RemoteStorageLocation + (*VolumeServerState)(nil), // 0: volume_server_pb.VolumeServerState + (*BatchDeleteRequest)(nil), // 1: volume_server_pb.BatchDeleteRequest + (*BatchDeleteResponse)(nil), // 2: volume_server_pb.BatchDeleteResponse + (*DeleteResult)(nil), // 3: volume_server_pb.DeleteResult + (*Empty)(nil), // 4: volume_server_pb.Empty + (*VacuumVolumeCheckRequest)(nil), // 5: volume_server_pb.VacuumVolumeCheckRequest + (*VacuumVolumeCheckResponse)(nil), // 6: volume_server_pb.VacuumVolumeCheckResponse + (*VacuumVolumeCompactRequest)(nil), // 7: volume_server_pb.VacuumVolumeCompactRequest + (*VacuumVolumeCompactResponse)(nil), // 8: volume_server_pb.VacuumVolumeCompactResponse + (*VacuumVolumeCommitRequest)(nil), // 9: volume_server_pb.VacuumVolumeCommitRequest + (*VacuumVolumeCommitResponse)(nil), // 10: volume_server_pb.VacuumVolumeCommitResponse + (*VacuumVolumeCleanupRequest)(nil), // 11: volume_server_pb.VacuumVolumeCleanupRequest + (*VacuumVolumeCleanupResponse)(nil), // 12: volume_server_pb.VacuumVolumeCleanupResponse + (*DeleteCollectionRequest)(nil), // 13: volume_server_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 14: volume_server_pb.DeleteCollectionResponse + (*AllocateVolumeRequest)(nil), // 15: volume_server_pb.AllocateVolumeRequest + (*AllocateVolumeResponse)(nil), // 16: volume_server_pb.AllocateVolumeResponse + (*VolumeSyncStatusRequest)(nil), // 17: volume_server_pb.VolumeSyncStatusRequest + (*VolumeSyncStatusResponse)(nil), // 18: volume_server_pb.VolumeSyncStatusResponse + (*VolumeIncrementalCopyRequest)(nil), // 19: volume_server_pb.VolumeIncrementalCopyRequest + (*VolumeIncrementalCopyResponse)(nil), // 20: volume_server_pb.VolumeIncrementalCopyResponse + (*VolumeMountRequest)(nil), // 21: volume_server_pb.VolumeMountRequest + (*VolumeMountResponse)(nil), // 22: volume_server_pb.VolumeMountResponse + (*VolumeUnmountRequest)(nil), // 23: volume_server_pb.VolumeUnmountRequest + (*VolumeUnmountResponse)(nil), // 24: volume_server_pb.VolumeUnmountResponse + (*VolumeDeleteRequest)(nil), // 25: volume_server_pb.VolumeDeleteRequest + (*VolumeDeleteResponse)(nil), // 26: volume_server_pb.VolumeDeleteResponse + (*VolumeMarkReadonlyRequest)(nil), // 27: volume_server_pb.VolumeMarkReadonlyRequest + (*VolumeMarkReadonlyResponse)(nil), // 28: volume_server_pb.VolumeMarkReadonlyResponse + (*VolumeMarkWritableRequest)(nil), // 29: volume_server_pb.VolumeMarkWritableRequest + (*VolumeMarkWritableResponse)(nil), // 30: volume_server_pb.VolumeMarkWritableResponse + (*VolumeConfigureRequest)(nil), // 31: volume_server_pb.VolumeConfigureRequest + (*VolumeConfigureResponse)(nil), // 32: volume_server_pb.VolumeConfigureResponse + (*VolumeStatusRequest)(nil), // 33: volume_server_pb.VolumeStatusRequest + (*VolumeStatusResponse)(nil), // 34: volume_server_pb.VolumeStatusResponse + (*VolumeCopyRequest)(nil), // 35: volume_server_pb.VolumeCopyRequest + (*VolumeCopyResponse)(nil), // 36: volume_server_pb.VolumeCopyResponse + (*CopyFileRequest)(nil), // 37: volume_server_pb.CopyFileRequest + (*CopyFileResponse)(nil), // 38: volume_server_pb.CopyFileResponse + (*ReceiveFileRequest)(nil), // 39: volume_server_pb.ReceiveFileRequest + (*ReceiveFileInfo)(nil), // 40: volume_server_pb.ReceiveFileInfo + (*ReceiveFileResponse)(nil), // 41: volume_server_pb.ReceiveFileResponse + (*ReadNeedleBlobRequest)(nil), // 42: volume_server_pb.ReadNeedleBlobRequest + (*ReadNeedleBlobResponse)(nil), // 43: volume_server_pb.ReadNeedleBlobResponse + (*ReadNeedleMetaRequest)(nil), // 44: volume_server_pb.ReadNeedleMetaRequest + (*ReadNeedleMetaResponse)(nil), // 45: volume_server_pb.ReadNeedleMetaResponse + (*WriteNeedleBlobRequest)(nil), // 46: volume_server_pb.WriteNeedleBlobRequest + (*WriteNeedleBlobResponse)(nil), // 47: volume_server_pb.WriteNeedleBlobResponse + (*ReadAllNeedlesRequest)(nil), // 48: volume_server_pb.ReadAllNeedlesRequest + (*ReadAllNeedlesResponse)(nil), // 49: volume_server_pb.ReadAllNeedlesResponse + (*VolumeTailSenderRequest)(nil), // 50: volume_server_pb.VolumeTailSenderRequest + (*VolumeTailSenderResponse)(nil), // 51: volume_server_pb.VolumeTailSenderResponse + (*VolumeTailReceiverRequest)(nil), // 52: volume_server_pb.VolumeTailReceiverRequest + (*VolumeTailReceiverResponse)(nil), // 53: volume_server_pb.VolumeTailReceiverResponse + (*VolumeEcShardsGenerateRequest)(nil), // 54: volume_server_pb.VolumeEcShardsGenerateRequest + (*VolumeEcShardsGenerateResponse)(nil), // 55: volume_server_pb.VolumeEcShardsGenerateResponse + (*VolumeEcShardsRebuildRequest)(nil), // 56: volume_server_pb.VolumeEcShardsRebuildRequest + (*VolumeEcShardsRebuildResponse)(nil), // 57: volume_server_pb.VolumeEcShardsRebuildResponse + (*VolumeEcShardsCopyRequest)(nil), // 58: volume_server_pb.VolumeEcShardsCopyRequest + (*VolumeEcShardsCopyResponse)(nil), // 59: volume_server_pb.VolumeEcShardsCopyResponse + (*VolumeEcShardsDeleteRequest)(nil), // 60: volume_server_pb.VolumeEcShardsDeleteRequest + (*VolumeEcShardsDeleteResponse)(nil), // 61: volume_server_pb.VolumeEcShardsDeleteResponse + (*VolumeEcShardsMountRequest)(nil), // 62: volume_server_pb.VolumeEcShardsMountRequest + (*VolumeEcShardsMountResponse)(nil), // 63: volume_server_pb.VolumeEcShardsMountResponse + (*VolumeEcShardsUnmountRequest)(nil), // 64: volume_server_pb.VolumeEcShardsUnmountRequest + (*VolumeEcShardsUnmountResponse)(nil), // 65: volume_server_pb.VolumeEcShardsUnmountResponse + (*VolumeEcShardReadRequest)(nil), // 66: volume_server_pb.VolumeEcShardReadRequest + (*VolumeEcShardReadResponse)(nil), // 67: volume_server_pb.VolumeEcShardReadResponse + (*VolumeEcBlobDeleteRequest)(nil), // 68: volume_server_pb.VolumeEcBlobDeleteRequest + (*VolumeEcBlobDeleteResponse)(nil), // 69: volume_server_pb.VolumeEcBlobDeleteResponse + (*VolumeEcShardsToVolumeRequest)(nil), // 70: volume_server_pb.VolumeEcShardsToVolumeRequest + (*VolumeEcShardsToVolumeResponse)(nil), // 71: volume_server_pb.VolumeEcShardsToVolumeResponse + (*VolumeEcShardsInfoRequest)(nil), // 72: volume_server_pb.VolumeEcShardsInfoRequest + (*VolumeEcShardsInfoResponse)(nil), // 73: volume_server_pb.VolumeEcShardsInfoResponse + (*EcShardInfo)(nil), // 74: volume_server_pb.EcShardInfo + (*ReadVolumeFileStatusRequest)(nil), // 75: volume_server_pb.ReadVolumeFileStatusRequest + (*ReadVolumeFileStatusResponse)(nil), // 76: volume_server_pb.ReadVolumeFileStatusResponse + (*DiskStatus)(nil), // 77: volume_server_pb.DiskStatus + (*MemStatus)(nil), // 78: volume_server_pb.MemStatus + (*RemoteFile)(nil), // 79: volume_server_pb.RemoteFile + (*VolumeInfo)(nil), // 80: volume_server_pb.VolumeInfo + (*EcShardConfig)(nil), // 81: volume_server_pb.EcShardConfig + (*OldVersionVolumeInfo)(nil), // 82: volume_server_pb.OldVersionVolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 83: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 84: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 85: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 86: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 87: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 88: volume_server_pb.VolumeServerStatusResponse + (*VolumeServerLeaveRequest)(nil), // 89: volume_server_pb.VolumeServerLeaveRequest + (*VolumeServerLeaveResponse)(nil), // 90: volume_server_pb.VolumeServerLeaveResponse + (*FetchAndWriteNeedleRequest)(nil), // 91: volume_server_pb.FetchAndWriteNeedleRequest + (*FetchAndWriteNeedleResponse)(nil), // 92: volume_server_pb.FetchAndWriteNeedleResponse + (*QueryRequest)(nil), // 93: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 94: volume_server_pb.QueriedStripe + (*VolumeNeedleStatusRequest)(nil), // 95: volume_server_pb.VolumeNeedleStatusRequest + (*VolumeNeedleStatusResponse)(nil), // 96: volume_server_pb.VolumeNeedleStatusResponse + (*PingRequest)(nil), // 97: volume_server_pb.PingRequest + (*PingResponse)(nil), // 98: volume_server_pb.PingResponse + (*FetchAndWriteNeedleRequest_Replica)(nil), // 99: volume_server_pb.FetchAndWriteNeedleRequest.Replica + (*QueryRequest_Filter)(nil), // 100: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 101: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 102: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 104: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 105: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 106: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 107: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + (*remote_pb.RemoteConf)(nil), // 108: remote_pb.RemoteConf + (*remote_pb.RemoteStorageLocation)(nil), // 109: remote_pb.RemoteStorageLocation } var file_volume_server_proto_depIdxs = []int32{ - 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult - 39, // 1: volume_server_pb.ReceiveFileRequest.info:type_name -> volume_server_pb.ReceiveFileInfo - 73, // 2: volume_server_pb.VolumeEcShardsInfoResponse.ec_shard_infos:type_name -> volume_server_pb.EcShardInfo - 79, // 3: volume_server_pb.ReadVolumeFileStatusResponse.volume_info:type_name -> volume_server_pb.VolumeInfo - 78, // 4: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 80, // 5: volume_server_pb.VolumeInfo.ec_shard_config:type_name -> volume_server_pb.EcShardConfig - 78, // 6: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 76, // 7: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus - 77, // 8: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus - 98, // 9: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica - 107, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf - 108, // 11: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation - 99, // 12: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter - 100, // 13: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization - 101, // 14: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization - 102, // 15: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput - 103, // 16: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput - 104, // 17: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput - 105, // 18: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - 106, // 19: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - 0, // 20: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest - 4, // 21: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest - 6, // 22: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest - 8, // 23: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest - 10, // 24: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest - 12, // 25: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest - 14, // 26: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest - 16, // 27: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest - 18, // 28: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest - 20, // 29: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest - 22, // 30: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest - 24, // 31: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest - 26, // 32: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest - 28, // 33: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest - 30, // 34: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest - 32, // 35: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest - 34, // 36: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest - 74, // 37: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest - 36, // 38: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest - 38, // 39: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest - 41, // 40: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest - 43, // 41: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest - 45, // 42: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest - 47, // 43: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest - 49, // 44: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest - 51, // 45: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest - 53, // 46: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest - 55, // 47: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest - 57, // 48: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest - 59, // 49: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest - 61, // 50: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest - 63, // 51: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest - 65, // 52: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest - 67, // 53: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest - 69, // 54: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest - 71, // 55: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest - 82, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest - 84, // 57: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest - 86, // 58: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest - 88, // 59: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest - 90, // 60: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest - 92, // 61: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest - 94, // 62: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest - 96, // 63: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest - 1, // 64: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse - 5, // 65: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse - 7, // 66: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse - 9, // 67: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse - 11, // 68: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse - 13, // 69: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse - 15, // 70: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse - 17, // 71: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse - 19, // 72: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse - 21, // 73: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse - 23, // 74: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse - 25, // 75: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse - 27, // 76: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse - 29, // 77: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse - 31, // 78: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse - 33, // 79: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse - 35, // 80: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse - 75, // 81: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse - 37, // 82: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse - 40, // 83: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse - 42, // 84: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse - 44, // 85: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse - 46, // 86: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse - 48, // 87: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse - 50, // 88: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse - 52, // 89: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse - 54, // 90: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse - 56, // 91: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse - 58, // 92: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse - 60, // 93: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse - 62, // 94: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse - 64, // 95: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse - 66, // 96: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse - 68, // 97: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse - 70, // 98: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse - 72, // 99: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse - 83, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse - 85, // 101: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse - 87, // 102: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse - 89, // 103: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse - 91, // 104: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse - 93, // 105: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe - 95, // 106: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse - 97, // 107: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse + 3, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult + 40, // 1: volume_server_pb.ReceiveFileRequest.info:type_name -> volume_server_pb.ReceiveFileInfo + 74, // 2: volume_server_pb.VolumeEcShardsInfoResponse.ec_shard_infos:type_name -> volume_server_pb.EcShardInfo + 80, // 3: volume_server_pb.ReadVolumeFileStatusResponse.volume_info:type_name -> volume_server_pb.VolumeInfo + 79, // 4: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 81, // 5: volume_server_pb.VolumeInfo.ec_shard_config:type_name -> volume_server_pb.EcShardConfig + 79, // 6: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 77, // 7: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 78, // 8: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 99, // 9: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica + 108, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf + 109, // 11: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation + 100, // 12: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 101, // 13: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 102, // 14: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 103, // 15: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 104, // 16: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 105, // 17: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 106, // 18: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 107, // 19: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 1, // 20: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest + 5, // 21: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest + 7, // 22: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest + 9, // 23: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest + 11, // 24: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest + 13, // 25: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest + 15, // 26: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest + 17, // 27: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest + 19, // 28: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest + 21, // 29: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest + 23, // 30: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest + 25, // 31: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest + 27, // 32: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest + 29, // 33: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest + 31, // 34: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest + 33, // 35: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest + 35, // 36: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest + 75, // 37: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 37, // 38: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest + 39, // 39: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest + 42, // 40: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest + 44, // 41: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest + 46, // 42: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest + 48, // 43: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest + 50, // 44: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 52, // 45: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 54, // 46: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 56, // 47: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 58, // 48: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 60, // 49: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 62, // 50: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 64, // 51: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 66, // 52: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 68, // 53: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 70, // 54: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 72, // 55: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest + 83, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 85, // 57: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 87, // 58: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 89, // 59: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest + 91, // 60: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest + 93, // 61: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 95, // 62: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest + 97, // 63: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest + 2, // 64: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 6, // 65: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 8, // 66: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 10, // 67: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 12, // 68: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 14, // 69: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 16, // 70: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 18, // 71: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 20, // 72: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 22, // 73: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 24, // 74: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 26, // 75: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 28, // 76: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 30, // 77: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse + 32, // 78: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 34, // 79: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse + 36, // 80: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 76, // 81: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 38, // 82: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 41, // 83: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse + 43, // 84: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse + 45, // 85: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse + 47, // 86: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse + 49, // 87: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse + 51, // 88: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 53, // 89: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 55, // 90: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 57, // 91: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 59, // 92: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 61, // 93: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 63, // 94: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 65, // 95: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 67, // 96: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 69, // 97: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 71, // 98: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 73, // 99: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse + 84, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 86, // 101: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 88, // 102: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 90, // 103: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse + 92, // 104: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse + 94, // 105: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 96, // 106: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse + 98, // 107: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse 64, // [64:108] is the sub-list for method output_type 20, // [20:64] is the sub-list for method input_type 20, // [20:20] is the sub-list for extension type_name @@ -6911,7 +6960,7 @@ func file_volume_server_proto_init() { if File_volume_server_proto != nil { return } - file_volume_server_proto_msgTypes[38].OneofWrappers = []any{ + file_volume_server_proto_msgTypes[39].OneofWrappers = []any{ (*ReceiveFileRequest_Info)(nil), (*ReceiveFileRequest_FileContent)(nil), } @@ -6921,7 +6970,7 @@ func file_volume_server_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_volume_server_proto_rawDesc), len(file_volume_server_proto_rawDesc)), NumEnums: 0, - NumMessages: 107, + NumMessages: 108, NumExtensions: 0, NumServices: 1, }, diff --git a/weed/pb/volume_server_pb/volume_server_grpc.pb.go b/weed/pb/volume_server_pb/volume_server_grpc.pb.go index 690ee99fc..fff1715c7 100644 --- a/weed/pb/volume_server_pb/volume_server_grpc.pb.go +++ b/weed/pb/volume_server_pb/volume_server_grpc.pb.go @@ -16,55 +16,7 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - VolumeServer_BatchDelete_FullMethodName = "/volume_server_pb.VolumeServer/BatchDelete" - VolumeServer_VacuumVolumeCheck_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCheck" - VolumeServer_VacuumVolumeCompact_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCompact" - VolumeServer_VacuumVolumeCommit_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCommit" - VolumeServer_VacuumVolumeCleanup_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCleanup" - VolumeServer_DeleteCollection_FullMethodName = "/volume_server_pb.VolumeServer/DeleteCollection" - VolumeServer_AllocateVolume_FullMethodName = "/volume_server_pb.VolumeServer/AllocateVolume" - VolumeServer_VolumeSyncStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeSyncStatus" - VolumeServer_VolumeIncrementalCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeIncrementalCopy" - VolumeServer_VolumeMount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeMount" - VolumeServer_VolumeUnmount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeUnmount" - VolumeServer_VolumeDelete_FullMethodName = "/volume_server_pb.VolumeServer/VolumeDelete" - VolumeServer_VolumeMarkReadonly_FullMethodName = "/volume_server_pb.VolumeServer/VolumeMarkReadonly" - VolumeServer_VolumeMarkWritable_FullMethodName = "/volume_server_pb.VolumeServer/VolumeMarkWritable" - VolumeServer_VolumeConfigure_FullMethodName = "/volume_server_pb.VolumeServer/VolumeConfigure" - VolumeServer_VolumeStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeStatus" - VolumeServer_VolumeCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeCopy" - VolumeServer_ReadVolumeFileStatus_FullMethodName = "/volume_server_pb.VolumeServer/ReadVolumeFileStatus" - VolumeServer_CopyFile_FullMethodName = "/volume_server_pb.VolumeServer/CopyFile" - VolumeServer_ReceiveFile_FullMethodName = "/volume_server_pb.VolumeServer/ReceiveFile" - VolumeServer_ReadNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleBlob" - VolumeServer_ReadNeedleMeta_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleMeta" - VolumeServer_WriteNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/WriteNeedleBlob" - VolumeServer_ReadAllNeedles_FullMethodName = "/volume_server_pb.VolumeServer/ReadAllNeedles" - VolumeServer_VolumeTailSender_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTailSender" - VolumeServer_VolumeTailReceiver_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTailReceiver" - VolumeServer_VolumeEcShardsGenerate_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate" - VolumeServer_VolumeEcShardsRebuild_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild" - VolumeServer_VolumeEcShardsCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsCopy" - VolumeServer_VolumeEcShardsDelete_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsDelete" - VolumeServer_VolumeEcShardsMount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsMount" - VolumeServer_VolumeEcShardsUnmount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount" - VolumeServer_VolumeEcShardRead_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardRead" - VolumeServer_VolumeEcBlobDelete_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcBlobDelete" - VolumeServer_VolumeEcShardsToVolume_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume" - VolumeServer_VolumeEcShardsInfo_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsInfo" - VolumeServer_VolumeTierMoveDatToRemote_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote" - VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote" - VolumeServer_VolumeServerStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeServerStatus" - VolumeServer_VolumeServerLeave_FullMethodName = "/volume_server_pb.VolumeServer/VolumeServerLeave" - VolumeServer_FetchAndWriteNeedle_FullMethodName = "/volume_server_pb.VolumeServer/FetchAndWriteNeedle" - VolumeServer_Query_FullMethodName = "/volume_server_pb.VolumeServer/Query" - VolumeServer_VolumeNeedleStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeNeedleStatus" - VolumeServer_Ping_FullMethodName = "/volume_server_pb.VolumeServer/Ping" -) +const _ = grpc.SupportPackageIsVersion7 // VolumeServerClient is the client API for VolumeServer service. // @@ -73,13 +25,13 @@ type VolumeServerClient interface { // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) - VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VacuumVolumeCompactResponse], error) + VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) - VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeIncrementalCopyResponse], error) + VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) @@ -88,15 +40,15 @@ type VolumeServerClient interface { VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume - VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeCopyResponse], error) + VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeCopyClient, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) - CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CopyFileResponse], error) - ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error) + CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) + ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (VolumeServer_ReceiveFileClient, error) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) - ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReadAllNeedlesResponse], error) - VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTailSenderResponse], error) + ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (VolumeServer_ReadAllNeedlesClient, error) + VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) // erasure coding VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) @@ -105,19 +57,19 @@ type VolumeServerClient interface { VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) - VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeEcShardReadResponse], error) + VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) VolumeEcShardsInfo(ctx context.Context, in *VolumeEcShardsInfoRequest, opts ...grpc.CallOption) (*VolumeEcShardsInfoResponse, error) // tiered storage - VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse], error) - VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse], error) + VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) + VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) // remote storage FetchAndWriteNeedle(ctx context.Context, in *FetchAndWriteNeedleRequest, opts ...grpc.CallOption) (*FetchAndWriteNeedleResponse, error) // query - Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[QueriedStripe], error) + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) } @@ -131,9 +83,8 @@ func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient { } func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(BatchDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_BatchDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...) if err != nil { return nil, err } @@ -141,22 +92,20 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq } func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VacuumVolumeCheckResponse) - err := c.cc.Invoke(ctx, VolumeServer_VacuumVolumeCheck_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VacuumVolumeCompactResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[0], VolumeServer_VacuumVolumeCompact_FullMethodName, cOpts...) +func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[0], "/volume_server_pb.VolumeServer/VacuumVolumeCompact", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VacuumVolumeCompactRequest, VacuumVolumeCompactResponse]{ClientStream: stream} + x := &volumeServerVacuumVolumeCompactClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -166,13 +115,26 @@ func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *Vacuum return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VacuumVolumeCompactClient = grpc.ServerStreamingClient[VacuumVolumeCompactResponse] +type VolumeServer_VacuumVolumeCompactClient interface { + Recv() (*VacuumVolumeCompactResponse, error) + grpc.ClientStream +} + +type volumeServerVacuumVolumeCompactClient struct { + grpc.ClientStream +} + +func (x *volumeServerVacuumVolumeCompactClient) Recv() (*VacuumVolumeCompactResponse, error) { + m := new(VacuumVolumeCompactResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VacuumVolumeCommitResponse) - err := c.cc.Invoke(ctx, VolumeServer_VacuumVolumeCommit_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...) if err != nil { return nil, err } @@ -180,9 +142,8 @@ func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumV } func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VacuumVolumeCleanupResponse) - err := c.cc.Invoke(ctx, VolumeServer_VacuumVolumeCleanup_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...) if err != nil { return nil, err } @@ -190,9 +151,8 @@ func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *Vacuum } func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteCollectionResponse) - err := c.cc.Invoke(ctx, VolumeServer_DeleteCollection_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -200,9 +160,8 @@ func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCol } func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AllocateVolumeResponse) - err := c.cc.Invoke(ctx, VolumeServer_AllocateVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...) if err != nil { return nil, err } @@ -210,22 +169,20 @@ func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVol } func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeSyncStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeSyncStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeIncrementalCopyResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[1], VolumeServer_VolumeIncrementalCopy_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[1], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeIncrementalCopyRequest, VolumeIncrementalCopyResponse]{ClientStream: stream} + x := &volumeServerVolumeIncrementalCopyClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -235,13 +192,26 @@ func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *Volu return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeIncrementalCopyClient = grpc.ServerStreamingClient[VolumeIncrementalCopyResponse] +type VolumeServer_VolumeIncrementalCopyClient interface { + Recv() (*VolumeIncrementalCopyResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeIncrementalCopyClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopyResponse, error) { + m := new(VolumeIncrementalCopyResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeMountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeMount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...) if err != nil { return nil, err } @@ -249,9 +219,8 @@ func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountReq } func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeUnmountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeUnmount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...) if err != nil { return nil, err } @@ -259,9 +228,8 @@ func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmoun } func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...) if err != nil { return nil, err } @@ -269,9 +237,8 @@ func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteR } func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeMarkReadonlyResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeMarkReadonly_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...) if err != nil { return nil, err } @@ -279,9 +246,8 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM } func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeMarkWritableResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeMarkWritable_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkWritable", in, out, opts...) if err != nil { return nil, err } @@ -289,9 +255,8 @@ func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeM } func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeConfigureResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeConfigure_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...) if err != nil { return nil, err } @@ -299,22 +264,20 @@ func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConf } func (c *volumeServerClient) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeCopyResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[2], VolumeServer_VolumeCopy_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeCopyClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeCopy", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeCopyRequest, VolumeCopyResponse]{ClientStream: stream} + x := &volumeServerVolumeCopyClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -324,26 +287,38 @@ func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyReque return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeCopyClient = grpc.ServerStreamingClient[VolumeCopyResponse] +type VolumeServer_VolumeCopyClient interface { + Recv() (*VolumeCopyResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeCopyClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeCopyClient) Recv() (*VolumeCopyResponse, error) { + m := new(VolumeCopyResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReadVolumeFileStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_ReadVolumeFileStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CopyFileResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[3], VolumeServer_CopyFile_FullMethodName, cOpts...) +func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[3], "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[CopyFileRequest, CopyFileResponse]{ClientStream: stream} + x := &volumeServerCopyFileClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -353,26 +328,60 @@ func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_CopyFileClient = grpc.ServerStreamingClient[CopyFileResponse] +type VolumeServer_CopyFileClient interface { + Recv() (*CopyFileResponse, error) + grpc.ClientStream +} + +type volumeServerCopyFileClient struct { + grpc.ClientStream +} + +func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { + m := new(CopyFileResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} -func (c *volumeServerClient) ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[4], VolumeServer_ReceiveFile_FullMethodName, cOpts...) +func (c *volumeServerClient) ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (VolumeServer_ReceiveFileClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[4], "/volume_server_pb.VolumeServer/ReceiveFile", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[ReceiveFileRequest, ReceiveFileResponse]{ClientStream: stream} + x := &volumeServerReceiveFileClient{stream} return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReceiveFileClient = grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse] +type VolumeServer_ReceiveFileClient interface { + Send(*ReceiveFileRequest) error + CloseAndRecv() (*ReceiveFileResponse, error) + grpc.ClientStream +} + +type volumeServerReceiveFileClient struct { + grpc.ClientStream +} + +func (x *volumeServerReceiveFileClient) Send(m *ReceiveFileRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *volumeServerReceiveFileClient) CloseAndRecv() (*ReceiveFileResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(ReceiveFileResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReadNeedleBlobResponse) - err := c.cc.Invoke(ctx, VolumeServer_ReadNeedleBlob_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleBlob", in, out, opts...) if err != nil { return nil, err } @@ -380,9 +389,8 @@ func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleB } func (c *volumeServerClient) ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReadNeedleMetaResponse) - err := c.cc.Invoke(ctx, VolumeServer_ReadNeedleMeta_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleMeta", in, out, opts...) if err != nil { return nil, err } @@ -390,22 +398,20 @@ func (c *volumeServerClient) ReadNeedleMeta(ctx context.Context, in *ReadNeedleM } func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WriteNeedleBlobResponse) - err := c.cc.Invoke(ctx, VolumeServer_WriteNeedleBlob_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReadAllNeedlesResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[5], VolumeServer_ReadAllNeedles_FullMethodName, cOpts...) +func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (VolumeServer_ReadAllNeedlesClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[5], "/volume_server_pb.VolumeServer/ReadAllNeedles", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[ReadAllNeedlesRequest, ReadAllNeedlesResponse]{ClientStream: stream} + x := &volumeServerReadAllNeedlesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -415,16 +421,29 @@ func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeed return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReadAllNeedlesClient = grpc.ServerStreamingClient[ReadAllNeedlesResponse] +type VolumeServer_ReadAllNeedlesClient interface { + Recv() (*ReadAllNeedlesResponse, error) + grpc.ClientStream +} + +type volumeServerReadAllNeedlesClient struct { + grpc.ClientStream +} + +func (x *volumeServerReadAllNeedlesClient) Recv() (*ReadAllNeedlesResponse, error) { + m := new(ReadAllNeedlesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} -func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTailSenderResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[6], VolumeServer_VolumeTailSender_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[6], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeTailSenderRequest, VolumeTailSenderResponse]{ClientStream: stream} + x := &volumeServerVolumeTailSenderClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -434,13 +453,26 @@ func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTai return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTailSenderClient = grpc.ServerStreamingClient[VolumeTailSenderResponse] +type VolumeServer_VolumeTailSenderClient interface { + Recv() (*VolumeTailSenderResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTailSenderClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, error) { + m := new(VolumeTailSenderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeTailReceiverResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeTailReceiver_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...) if err != nil { return nil, err } @@ -448,9 +480,8 @@ func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeT } func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsGenerateResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsGenerate_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...) if err != nil { return nil, err } @@ -458,9 +489,8 @@ func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *Vol } func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsRebuildResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsRebuild_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...) if err != nil { return nil, err } @@ -468,9 +498,8 @@ func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *Volu } func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsCopyResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsCopy_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...) if err != nil { return nil, err } @@ -478,9 +507,8 @@ func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeE } func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...) if err != nil { return nil, err } @@ -488,9 +516,8 @@ func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *Volum } func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsMountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsMount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...) if err != nil { return nil, err } @@ -498,22 +525,20 @@ func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *Volume } func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsUnmountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsUnmount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeEcShardReadResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[7], VolumeServer_VolumeEcShardRead_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[7], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeEcShardReadRequest, VolumeEcShardReadResponse]{ClientStream: stream} + x := &volumeServerVolumeEcShardReadClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -523,13 +548,26 @@ func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEc return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeEcShardReadClient = grpc.ServerStreamingClient[VolumeEcShardReadResponse] +type VolumeServer_VolumeEcShardReadClient interface { + Recv() (*VolumeEcShardReadResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeEcShardReadClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse, error) { + m := new(VolumeEcShardReadResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcBlobDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcBlobDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...) if err != nil { return nil, err } @@ -537,9 +575,8 @@ func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeE } func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsToVolumeResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsToVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...) if err != nil { return nil, err } @@ -547,22 +584,20 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol } func (c *volumeServerClient) VolumeEcShardsInfo(ctx context.Context, in *VolumeEcShardsInfoRequest, opts ...grpc.CallOption) (*VolumeEcShardsInfoResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsInfoResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsInfo_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[8], VolumeServer_VolumeTierMoveDatToRemote_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[8], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeTierMoveDatToRemoteRequest, VolumeTierMoveDatToRemoteResponse]{ClientStream: stream} + x := &volumeServerVolumeTierMoveDatToRemoteClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -572,16 +607,29 @@ func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in * return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatToRemoteClient = grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse] +type VolumeServer_VolumeTierMoveDatToRemoteClient interface { + Recv() (*VolumeTierMoveDatToRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatToRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDatToRemoteResponse, error) { + m := new(VolumeTierMoveDatToRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} -func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[9], VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[9], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeTierMoveDatFromRemoteRequest, VolumeTierMoveDatFromRemoteResponse]{ClientStream: stream} + x := &volumeServerVolumeTierMoveDatFromRemoteClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -591,13 +639,26 @@ func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatFromRemoteClient = grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse] +type VolumeServer_VolumeTierMoveDatFromRemoteClient interface { + Recv() (*VolumeTierMoveDatFromRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatFromRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveDatFromRemoteResponse, error) { + m := new(VolumeTierMoveDatFromRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeServerStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeServerStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...) if err != nil { return nil, err } @@ -605,9 +666,8 @@ func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeS } func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeServerLeaveResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeServerLeave_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerLeave", in, out, opts...) if err != nil { return nil, err } @@ -615,22 +675,20 @@ func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeSe } func (c *volumeServerClient) FetchAndWriteNeedle(ctx context.Context, in *FetchAndWriteNeedleRequest, opts ...grpc.CallOption) (*FetchAndWriteNeedleResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(FetchAndWriteNeedleResponse) - err := c.cc.Invoke(ctx, VolumeServer_FetchAndWriteNeedle_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/FetchAndWriteNeedle", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[QueriedStripe], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[10], VolumeServer_Query_FullMethodName, cOpts...) +func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[10], "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[QueryRequest, QueriedStripe]{ClientStream: stream} + x := &volumeServerQueryClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -640,13 +698,26 @@ func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts . return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_QueryClient = grpc.ServerStreamingClient[QueriedStripe] +type VolumeServer_QueryClient interface { + Recv() (*QueriedStripe, error) + grpc.ClientStream +} + +type volumeServerQueryClient struct { + grpc.ClientStream +} + +func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { + m := new(QueriedStripe) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeNeedleStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeNeedleStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeNeedleStatus", in, out, opts...) if err != nil { return nil, err } @@ -654,9 +725,8 @@ func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeN } func (c *volumeServerClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(PingResponse) - err := c.cc.Invoke(ctx, VolumeServer_Ping_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/Ping", in, out, opts...) if err != nil { return nil, err } @@ -665,18 +735,18 @@ func (c *volumeServerClient) Ping(ctx context.Context, in *PingRequest, opts ... // VolumeServerServer is the server API for VolumeServer service. // All implementations must embed UnimplementedVolumeServerServer -// for forward compatibility. +// for forward compatibility type VolumeServerServer interface { // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) - VacuumVolumeCompact(*VacuumVolumeCompactRequest, grpc.ServerStreamingServer[VacuumVolumeCompactResponse]) error + VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) - VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, grpc.ServerStreamingServer[VolumeIncrementalCopyResponse]) error + VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) @@ -685,15 +755,15 @@ type VolumeServerServer interface { VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume - VolumeCopy(*VolumeCopyRequest, grpc.ServerStreamingServer[VolumeCopyResponse]) error + VolumeCopy(*VolumeCopyRequest, VolumeServer_VolumeCopyServer) error ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) - CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error - ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error + CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error + ReceiveFile(VolumeServer_ReceiveFileServer) error ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) ReadNeedleMeta(context.Context, *ReadNeedleMetaRequest) (*ReadNeedleMetaResponse, error) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) - ReadAllNeedles(*ReadAllNeedlesRequest, grpc.ServerStreamingServer[ReadAllNeedlesResponse]) error - VolumeTailSender(*VolumeTailSenderRequest, grpc.ServerStreamingServer[VolumeTailSenderResponse]) error + ReadAllNeedles(*ReadAllNeedlesRequest, VolumeServer_ReadAllNeedlesServer) error + VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) // erasure coding VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) @@ -702,30 +772,27 @@ type VolumeServerServer interface { VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) - VolumeEcShardRead(*VolumeEcShardReadRequest, grpc.ServerStreamingServer[VolumeEcShardReadResponse]) error + VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) VolumeEcShardsInfo(context.Context, *VolumeEcShardsInfoRequest) (*VolumeEcShardsInfoResponse, error) // tiered storage - VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse]) error - VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatFromRemoteResponse]) error + VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error + VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) // remote storage FetchAndWriteNeedle(context.Context, *FetchAndWriteNeedleRequest) (*FetchAndWriteNeedleResponse, error) // query - Query(*QueryRequest, grpc.ServerStreamingServer[QueriedStripe]) error + Query(*QueryRequest, VolumeServer_QueryServer) error VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) Ping(context.Context, *PingRequest) (*PingResponse, error) mustEmbedUnimplementedVolumeServerServer() } -// UnimplementedVolumeServerServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedVolumeServerServer struct{} +// UnimplementedVolumeServerServer must be embedded to have forward compatible implementations. +type UnimplementedVolumeServerServer struct { +} func (UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented") @@ -733,7 +800,7 @@ func (UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDelete func (UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented") } -func (UnimplementedVolumeServerServer) VacuumVolumeCompact(*VacuumVolumeCompactRequest, grpc.ServerStreamingServer[VacuumVolumeCompactResponse]) error { +func (UnimplementedVolumeServerServer) VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error { return status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented") } func (UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) { @@ -751,7 +818,7 @@ func (UnimplementedVolumeServerServer) AllocateVolume(context.Context, *Allocate func (UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented") } -func (UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, grpc.ServerStreamingServer[VolumeIncrementalCopyResponse]) error { +func (UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error { return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented") } func (UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) { @@ -775,16 +842,16 @@ func (UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeC func (UnimplementedVolumeServerServer) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeStatus not implemented") } -func (UnimplementedVolumeServerServer) VolumeCopy(*VolumeCopyRequest, grpc.ServerStreamingServer[VolumeCopyResponse]) error { +func (UnimplementedVolumeServerServer) VolumeCopy(*VolumeCopyRequest, VolumeServer_VolumeCopyServer) error { return status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented") } func (UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented") } -func (UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error { +func (UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error { return status.Errorf(codes.Unimplemented, "method CopyFile not implemented") } -func (UnimplementedVolumeServerServer) ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error { +func (UnimplementedVolumeServerServer) ReceiveFile(VolumeServer_ReceiveFileServer) error { return status.Errorf(codes.Unimplemented, "method ReceiveFile not implemented") } func (UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) { @@ -796,10 +863,10 @@ func (UnimplementedVolumeServerServer) ReadNeedleMeta(context.Context, *ReadNeed func (UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented") } -func (UnimplementedVolumeServerServer) ReadAllNeedles(*ReadAllNeedlesRequest, grpc.ServerStreamingServer[ReadAllNeedlesResponse]) error { +func (UnimplementedVolumeServerServer) ReadAllNeedles(*ReadAllNeedlesRequest, VolumeServer_ReadAllNeedlesServer) error { return status.Errorf(codes.Unimplemented, "method ReadAllNeedles not implemented") } -func (UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, grpc.ServerStreamingServer[VolumeTailSenderResponse]) error { +func (UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented") } func (UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) { @@ -823,7 +890,7 @@ func (UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *Vol func (UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented") } -func (UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, grpc.ServerStreamingServer[VolumeEcShardReadResponse]) error { +func (UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error { return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented") } func (UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) { @@ -835,10 +902,10 @@ func (UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, * func (UnimplementedVolumeServerServer) VolumeEcShardsInfo(context.Context, *VolumeEcShardsInfoRequest) (*VolumeEcShardsInfoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsInfo not implemented") } -func (UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse]) error { +func (UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented") } -func (UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatFromRemoteResponse]) error { +func (UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented") } func (UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) { @@ -850,7 +917,7 @@ func (UnimplementedVolumeServerServer) VolumeServerLeave(context.Context, *Volum func (UnimplementedVolumeServerServer) FetchAndWriteNeedle(context.Context, *FetchAndWriteNeedleRequest) (*FetchAndWriteNeedleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchAndWriteNeedle not implemented") } -func (UnimplementedVolumeServerServer) Query(*QueryRequest, grpc.ServerStreamingServer[QueriedStripe]) error { +func (UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error { return status.Errorf(codes.Unimplemented, "method Query not implemented") } func (UnimplementedVolumeServerServer) VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) { @@ -860,7 +927,6 @@ func (UnimplementedVolumeServerServer) Ping(context.Context, *PingRequest) (*Pin return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") } func (UnimplementedVolumeServerServer) mustEmbedUnimplementedVolumeServerServer() {} -func (UnimplementedVolumeServerServer) testEmbeddedByValue() {} // UnsafeVolumeServerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to VolumeServerServer will @@ -869,15 +935,8 @@ type UnsafeVolumeServerServer interface { mustEmbedUnimplementedVolumeServerServer() } -func RegisterVolumeServerServer(s grpc.ServiceRegistrar, srv VolumeServerServer) { - // If the following call pancis, it indicates UnimplementedVolumeServerServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&VolumeServer_ServiceDesc, srv) +func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { + s.RegisterService(&_VolumeServer_serviceDesc, srv) } func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -890,7 +949,7 @@ func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_BatchDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/BatchDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).BatchDelete(ctx, req.(*BatchDeleteRequest)) @@ -908,7 +967,7 @@ func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VacuumVolumeCheck_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, req.(*VacuumVolumeCheckRequest)) @@ -921,11 +980,21 @@ func _VolumeServer_VacuumVolumeCompact_Handler(srv interface{}, stream grpc.Serv if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VacuumVolumeCompact(m, &grpc.GenericServerStream[VacuumVolumeCompactRequest, VacuumVolumeCompactResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VacuumVolumeCompact(m, &volumeServerVacuumVolumeCompactServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VacuumVolumeCompactServer = grpc.ServerStreamingServer[VacuumVolumeCompactResponse] +type VolumeServer_VacuumVolumeCompactServer interface { + Send(*VacuumVolumeCompactResponse) error + grpc.ServerStream +} + +type volumeServerVacuumVolumeCompactServer struct { + grpc.ServerStream +} + +func (x *volumeServerVacuumVolumeCompactServer) Send(m *VacuumVolumeCompactResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VacuumVolumeCommitRequest) @@ -937,7 +1006,7 @@ func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VacuumVolumeCommit_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCommit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, req.(*VacuumVolumeCommitRequest)) @@ -955,7 +1024,7 @@ func _VolumeServer_VacuumVolumeCleanup_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VacuumVolumeCleanup_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, req.(*VacuumVolumeCleanupRequest)) @@ -973,7 +1042,7 @@ func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_DeleteCollection_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/DeleteCollection", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest)) @@ -991,7 +1060,7 @@ func _VolumeServer_AllocateVolume_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_AllocateVolume_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/AllocateVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).AllocateVolume(ctx, req.(*AllocateVolumeRequest)) @@ -1009,7 +1078,7 @@ func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeSyncStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeSyncStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeSyncStatus(ctx, req.(*VolumeSyncStatusRequest)) @@ -1022,11 +1091,21 @@ func _VolumeServer_VolumeIncrementalCopy_Handler(srv interface{}, stream grpc.Se if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &grpc.GenericServerStream[VolumeIncrementalCopyRequest, VolumeIncrementalCopyResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &volumeServerVolumeIncrementalCopyServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeIncrementalCopyServer = grpc.ServerStreamingServer[VolumeIncrementalCopyResponse] +type VolumeServer_VolumeIncrementalCopyServer interface { + Send(*VolumeIncrementalCopyResponse) error + grpc.ServerStream +} + +type volumeServerVolumeIncrementalCopyServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeIncrementalCopyServer) Send(m *VolumeIncrementalCopyResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeMountRequest) @@ -1038,7 +1117,7 @@ func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeMount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMount(ctx, req.(*VolumeMountRequest)) @@ -1056,7 +1135,7 @@ func _VolumeServer_VolumeUnmount_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeUnmount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeUnmount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeUnmount(ctx, req.(*VolumeUnmountRequest)) @@ -1074,7 +1153,7 @@ func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeDelete(ctx, req.(*VolumeDeleteRequest)) @@ -1092,7 +1171,7 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeMarkReadonly_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkReadonly", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest)) @@ -1110,7 +1189,7 @@ func _VolumeServer_VolumeMarkWritable_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeMarkWritable_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkWritable", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMarkWritable(ctx, req.(*VolumeMarkWritableRequest)) @@ -1128,7 +1207,7 @@ func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeConfigure_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) @@ -1146,7 +1225,7 @@ func _VolumeServer_VolumeStatus_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeStatus(ctx, req.(*VolumeStatusRequest)) @@ -1159,11 +1238,21 @@ func _VolumeServer_VolumeCopy_Handler(srv interface{}, stream grpc.ServerStream) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeCopy(m, &grpc.GenericServerStream[VolumeCopyRequest, VolumeCopyResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeCopy(m, &volumeServerVolumeCopyServer{stream}) +} + +type VolumeServer_VolumeCopyServer interface { + Send(*VolumeCopyResponse) error + grpc.ServerStream +} + +type volumeServerVolumeCopyServer struct { + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeCopyServer = grpc.ServerStreamingServer[VolumeCopyResponse] +func (x *volumeServerVolumeCopyServer) Send(m *VolumeCopyResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadVolumeFileStatusRequest) @@ -1175,7 +1264,7 @@ func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_ReadVolumeFileStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, req.(*ReadVolumeFileStatusRequest)) @@ -1188,18 +1277,47 @@ func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) e if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).CopyFile(m, &grpc.GenericServerStream[CopyFileRequest, CopyFileResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).CopyFile(m, &volumeServerCopyFileServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_CopyFileServer = grpc.ServerStreamingServer[CopyFileResponse] +type VolumeServer_CopyFileServer interface { + Send(*CopyFileResponse) error + grpc.ServerStream +} + +type volumeServerCopyFileServer struct { + grpc.ServerStream +} + +func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_ReceiveFile_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(VolumeServerServer).ReceiveFile(&grpc.GenericServerStream[ReceiveFileRequest, ReceiveFileResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).ReceiveFile(&volumeServerReceiveFileServer{stream}) +} + +type VolumeServer_ReceiveFileServer interface { + SendAndClose(*ReceiveFileResponse) error + Recv() (*ReceiveFileRequest, error) + grpc.ServerStream +} + +type volumeServerReceiveFileServer struct { + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReceiveFileServer = grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse] +func (x *volumeServerReceiveFileServer) SendAndClose(m *ReceiveFileResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *volumeServerReceiveFileServer) Recv() (*ReceiveFileRequest, error) { + m := new(ReceiveFileRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadNeedleBlobRequest) @@ -1211,7 +1329,7 @@ func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_ReadNeedleBlob_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleBlob", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).ReadNeedleBlob(ctx, req.(*ReadNeedleBlobRequest)) @@ -1229,7 +1347,7 @@ func _VolumeServer_ReadNeedleMeta_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_ReadNeedleMeta_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleMeta", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).ReadNeedleMeta(ctx, req.(*ReadNeedleMetaRequest)) @@ -1247,7 +1365,7 @@ func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_WriteNeedleBlob_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/WriteNeedleBlob", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).WriteNeedleBlob(ctx, req.(*WriteNeedleBlobRequest)) @@ -1260,22 +1378,42 @@ func _VolumeServer_ReadAllNeedles_Handler(srv interface{}, stream grpc.ServerStr if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).ReadAllNeedles(m, &grpc.GenericServerStream[ReadAllNeedlesRequest, ReadAllNeedlesResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).ReadAllNeedles(m, &volumeServerReadAllNeedlesServer{stream}) +} + +type VolumeServer_ReadAllNeedlesServer interface { + Send(*ReadAllNeedlesResponse) error + grpc.ServerStream +} + +type volumeServerReadAllNeedlesServer struct { + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReadAllNeedlesServer = grpc.ServerStreamingServer[ReadAllNeedlesResponse] +func (x *volumeServerReadAllNeedlesServer) Send(m *ReadAllNeedlesResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTailSenderRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeTailSender(m, &grpc.GenericServerStream[VolumeTailSenderRequest, VolumeTailSenderResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeTailSender(m, &volumeServerVolumeTailSenderServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTailSenderServer = grpc.ServerStreamingServer[VolumeTailSenderResponse] +type VolumeServer_VolumeTailSenderServer interface { + Send(*VolumeTailSenderResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTailSenderServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTailSenderServer) Send(m *VolumeTailSenderResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeTailReceiverRequest) @@ -1287,7 +1425,7 @@ func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeTailReceiver_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeTailReceiver", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeTailReceiver(ctx, req.(*VolumeTailReceiverRequest)) @@ -1305,7 +1443,7 @@ func _VolumeServer_VolumeEcShardsGenerate_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsGenerate_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, req.(*VolumeEcShardsGenerateRequest)) @@ -1323,7 +1461,7 @@ func _VolumeServer_VolumeEcShardsRebuild_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsRebuild_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, req.(*VolumeEcShardsRebuildRequest)) @@ -1341,7 +1479,7 @@ func _VolumeServer_VolumeEcShardsCopy_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsCopy_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, req.(*VolumeEcShardsCopyRequest)) @@ -1359,7 +1497,7 @@ func _VolumeServer_VolumeEcShardsDelete_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, req.(*VolumeEcShardsDeleteRequest)) @@ -1377,7 +1515,7 @@ func _VolumeServer_VolumeEcShardsMount_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsMount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsMount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, req.(*VolumeEcShardsMountRequest)) @@ -1395,7 +1533,7 @@ func _VolumeServer_VolumeEcShardsUnmount_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsUnmount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, req.(*VolumeEcShardsUnmountRequest)) @@ -1408,11 +1546,21 @@ func _VolumeServer_VolumeEcShardRead_Handler(srv interface{}, stream grpc.Server if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeEcShardRead(m, &grpc.GenericServerStream[VolumeEcShardReadRequest, VolumeEcShardReadResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeEcShardRead(m, &volumeServerVolumeEcShardReadServer{stream}) +} + +type VolumeServer_VolumeEcShardReadServer interface { + Send(*VolumeEcShardReadResponse) error + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeEcShardReadServer = grpc.ServerStreamingServer[VolumeEcShardReadResponse] +type volumeServerVolumeEcShardReadServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeEcShardReadServer) Send(m *VolumeEcShardReadResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcBlobDeleteRequest) @@ -1424,7 +1572,7 @@ func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcBlobDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, req.(*VolumeEcBlobDeleteRequest)) @@ -1442,7 +1590,7 @@ func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsToVolume_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, req.(*VolumeEcShardsToVolumeRequest)) @@ -1460,7 +1608,7 @@ func _VolumeServer_VolumeEcShardsInfo_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsInfo_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsInfo(ctx, req.(*VolumeEcShardsInfoRequest)) @@ -1473,22 +1621,42 @@ func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grp if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &grpc.GenericServerStream[VolumeTierMoveDatToRemoteRequest, VolumeTierMoveDatToRemoteResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &volumeServerVolumeTierMoveDatToRemoteServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatToRemoteServer = grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse] +type VolumeServer_VolumeTierMoveDatToRemoteServer interface { + Send(*VolumeTierMoveDatToRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatToRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteServer) Send(m *VolumeTierMoveDatToRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeTierMoveDatFromRemote_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTierMoveDatFromRemoteRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &grpc.GenericServerStream[VolumeTierMoveDatFromRemoteRequest, VolumeTierMoveDatFromRemoteResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &volumeServerVolumeTierMoveDatFromRemoteServer{stream}) +} + +type VolumeServer_VolumeTierMoveDatFromRemoteServer interface { + Send(*VolumeTierMoveDatFromRemoteResponse) error + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatFromRemoteServer = grpc.ServerStreamingServer[VolumeTierMoveDatFromRemoteResponse] +type volumeServerVolumeTierMoveDatFromRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDatFromRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeServerStatusRequest) @@ -1500,7 +1668,7 @@ func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeServerStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) @@ -1518,7 +1686,7 @@ func _VolumeServer_VolumeServerLeave_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeServerLeave_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerLeave", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeServerLeave(ctx, req.(*VolumeServerLeaveRequest)) @@ -1536,7 +1704,7 @@ func _VolumeServer_FetchAndWriteNeedle_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_FetchAndWriteNeedle_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/FetchAndWriteNeedle", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).FetchAndWriteNeedle(ctx, req.(*FetchAndWriteNeedleRequest)) @@ -1549,11 +1717,21 @@ func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) erro if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).Query(m, &grpc.GenericServerStream[QueryRequest, QueriedStripe]{ServerStream: stream}) + return srv.(VolumeServerServer).Query(m, &volumeServerQueryServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_QueryServer = grpc.ServerStreamingServer[QueriedStripe] +type VolumeServer_QueryServer interface { + Send(*QueriedStripe) error + grpc.ServerStream +} + +type volumeServerQueryServer struct { + grpc.ServerStream +} + +func (x *volumeServerQueryServer) Send(m *QueriedStripe) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeNeedleStatusRequest) @@ -1565,7 +1743,7 @@ func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeNeedleStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeNeedleStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, req.(*VolumeNeedleStatusRequest)) @@ -1583,7 +1761,7 @@ func _VolumeServer_Ping_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_Ping_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/Ping", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).Ping(ctx, req.(*PingRequest)) @@ -1591,10 +1769,7 @@ func _VolumeServer_Ping_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } -// VolumeServer_ServiceDesc is the grpc.ServiceDesc for VolumeServer service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var VolumeServer_ServiceDesc = grpc.ServiceDesc{ +var _VolumeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "volume_server_pb.VolumeServer", HandlerType: (*VolumeServerServer)(nil), Methods: []grpc.MethodDesc{ diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index df6445327..255a0d7a7 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -165,6 +165,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ glog.V(4).Infof("master received heartbeat %s", heartbeat.String()) stats.MasterReceivedHeartbeatCounter.WithLabelValues("total").Inc() + // TODO(issues/7977): process status heartbeat updates from volume servers message := &master_pb.VolumeLocation{ Url: dn.Url(), diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index fecab4894..a4349771b 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -212,6 +212,19 @@ func (vs *VolumeServer) doHeartbeatWithRetry(masterAddress pb.ServerAddress, grp port := uint32(vs.store.Port) for { select { + case stateMessage := <-vs.store.StateUpdateChan: + stateBeat := &master_pb.Heartbeat{ + Ip: ip, + Port: port, + DataCenter: dataCenter, + Rack: rack, + State: stateMessage, + } + glog.V(0).Infof("volume server %s:%d updates state to %v", vs.store.Ip, vs.store.Port, stateMessage) + if err = stream.Send(stateBeat); err != nil { + glog.V(0).Infof("Volume Server Failed to update state to master %s: %v", masterAddress, err) + return "", err + } case volumeMessage := <-vs.store.NewVolumesChan: deltaBeat := &master_pb.Heartbeat{ Ip: ip, diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index 671933a1d..72b306ca5 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -19,6 +19,11 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" ) +const ( + UUIDFileName = "vol_dir.uuid" + UUIDFileMod = 0644 +) + type DiskLocation struct { Directory string DirectoryUuid string @@ -42,7 +47,7 @@ type DiskLocation struct { func GenerateDirUuid(dir string) (dirUuidString string, err error) { glog.V(1).Infof("Getting uuid of volume directory:%s", dir) - fileName := dir + "/vol_dir.uuid" + fileName := filepath.Join(dir, UUIDFileName) if !util.FileExists(fileName) { dirUuidString, err = writeNewUuid(fileName) } else { @@ -62,7 +67,7 @@ func GenerateDirUuid(dir string) (dirUuidString string, err error) { func writeNewUuid(fileName string) (string, error) { dirUuid, _ := uuid.NewRandom() dirUuidString := dirUuid.String() - if err := util.WriteFile(fileName, []byte(dirUuidString), 0644); err != nil { + if err := util.WriteFile(fileName, []byte(dirUuidString), UUIDFileMod); err != nil { return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, err) } return dirUuidString, nil diff --git a/weed/storage/store.go b/weed/storage/store.go index 8c8571cc8..5343ec12d 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -16,6 +16,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" + "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -25,6 +26,7 @@ import ( const ( MAX_TTL_VOLUME_REMOVAL_DELAY = 10 // 10 minutes + HEARTBEAT_CHAN_SIZE = 1024 ) type ReadOption struct { @@ -69,6 +71,8 @@ type Store struct { rack string // optional information, overwriting master setting if exists connected bool NeedleMapKind NeedleMapKind + State *State + StateUpdateChan chan *volume_server_pb.VolumeServerState NewVolumesChan chan master_pb.VolumeShortInformationMessage DeletedVolumesChan chan master_pb.VolumeShortInformationMessage NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage @@ -81,16 +85,31 @@ func (s *Store) String() (str string) { return } -func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int, publicUrl string, id string, dirnames []string, maxVolumeCounts []int32, - minFreeSpaces []util.MinFreeSpace, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType, ldbTimeout int64) (s *Store) { - s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, GrpcPort: grpcPort, PublicUrl: publicUrl, Id: id, NeedleMapKind: needleMapKind} - s.Locations = make([]*DiskLocation, 0) - - s.NewVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 1024) - s.DeletedVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 1024) - - s.NewEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 1024) - s.DeletedEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 1024) +func NewStore( + grpcDialOption grpc.DialOption, + ip string, port int, grpcPort int, publicUrl string, id string, + dirnames []string, maxVolumeCounts []int32, minFreeSpaces []util.MinFreeSpace, + idxFolder string, + needleMapKind NeedleMapKind, + diskTypes []DiskType, + ldbTimeout int64, +) (s *Store) { + s = &Store{ + grpcDialOption: grpcDialOption, + Port: port, + Ip: ip, + GrpcPort: grpcPort, + PublicUrl: publicUrl, + Id: id, + NeedleMapKind: needleMapKind, + Locations: make([]*DiskLocation, 0), + + StateUpdateChan: make(chan *volume_server_pb.VolumeServerState, HEARTBEAT_CHAN_SIZE), + NewVolumesChan: make(chan master_pb.VolumeShortInformationMessage, HEARTBEAT_CHAN_SIZE), + DeletedVolumesChan: make(chan master_pb.VolumeShortInformationMessage, HEARTBEAT_CHAN_SIZE), + NewEcShardsChan: make(chan master_pb.VolumeEcShardInformationMessage, HEARTBEAT_CHAN_SIZE), + DeletedEcShardsChan: make(chan master_pb.VolumeEcShardInformationMessage, HEARTBEAT_CHAN_SIZE), + } var wg sync.WaitGroup for i := 0; i < len(dirnames); i++ { @@ -130,8 +149,36 @@ func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int, } wg.Wait() + var err error + s.State, err = NewState(idxFolder) + if err != nil { + glog.Fatalf("failed to resolve state for volume %s: %v", id, err) + } + return } + +func (s *Store) LoadState() error { + err := s.State.Load() + if s.State.Pb != nil && err == nil { + s.StateUpdateChan <- s.State.Pb + } + return err +} + +func (s *Store) SaveState() error { + if s.State.Pb == nil { + glog.Warningf("tried to save empty state for store %s", s.Id) + return nil + } + + err := s.State.Save() + if s.State.Pb != nil && err == nil { + s.StateUpdateChan <- s.State.Pb + } + return err +} + func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, ver needle.Version, MemoryMapMaxSizeMb uint32, diskType DiskType, ldbTimeout int64) error { rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement) if e != nil { @@ -144,6 +191,7 @@ func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMap e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, ver, MemoryMapMaxSizeMb, diskType, ldbTimeout) return e } + func (s *Store) DeleteCollection(collection string) (e error) { for _, location := range s.Locations { e = location.DeleteCollectionFromDiskLocation(collection) diff --git a/weed/storage/store_state.go b/weed/storage/store_state.go new file mode 100644 index 000000000..39c2bfba8 --- /dev/null +++ b/weed/storage/store_state.go @@ -0,0 +1,71 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/golang/protobuf/proto" + "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" + "github.com/seaweedfs/seaweedfs/weed/util" +) + +const ( + StateFileName = "state.pb" + StateFileMode = 0644 +) + +type State struct { + FilePath string + Pb *volume_server_pb.VolumeServerState +} + +func NewState(dir string) (*State, error) { + state := &State{ + FilePath: filepath.Join(dir, StateFileName), + Pb: nil, + } + + err := state.Load() + return state, err +} + +func (st *State) Load() error { + st.Pb = &volume_server_pb.VolumeServerState{} + + if !util.FileExists(st.FilePath) { + glog.V(1).Infof("No preexisting store state at %s", st.FilePath) + return nil + } + + binPb, err := os.ReadFile(st.FilePath) + if err != nil { + st.Pb = nil + return fmt.Errorf("failed to read store state from %s : %v", st.FilePath, err) + } + if err := proto.Unmarshal(binPb, st.Pb); err != nil { + st.Pb = nil + return fmt.Errorf("failed to parse store state from %s : %v", st.FilePath, err) + } + + glog.V(1).Infof("Got store state from %s: %v", st.FilePath, st.Pb) + return nil +} + +func (st *State) Save() error { + if st.Pb == nil { + st.Pb = &volume_server_pb.VolumeServerState{} + } + + binPb, err := proto.Marshal(st.Pb) + if err != nil { + return fmt.Errorf("failed to serialize store state %v: %s", st.Pb, err) + } + if err := util.WriteFile(st.FilePath, binPb, StateFileMode); err != nil { + return fmt.Errorf("failed to write store state to %s : %v", st.FilePath, err) + } + + glog.V(1).Infof("Saved store state %v to %s", st.Pb, st.FilePath) + return nil +} From 587e782feb05b6c4ae40c552744977cfe2a24059 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 10:49:26 -0800 Subject: [PATCH 04/17] storage: use non-blocking send to StateUpdateChan --- weed/storage/store.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/weed/storage/store.go b/weed/storage/store.go index 5343ec12d..389980667 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -161,7 +161,11 @@ func NewStore( func (s *Store) LoadState() error { err := s.State.Load() if s.State.Pb != nil && err == nil { - s.StateUpdateChan <- s.State.Pb + select { + case s.StateUpdateChan <- s.State.Pb: + default: + glog.V(2).Infof("StateUpdateChan full during LoadState, state will be reported in heartbeat") + } } return err } @@ -174,7 +178,11 @@ func (s *Store) SaveState() error { err := s.State.Save() if s.State.Pb != nil && err == nil { - s.StateUpdateChan <- s.State.Pb + select { + case s.StateUpdateChan <- s.State.Pb: + default: + glog.V(2).Infof("StateUpdateChan full during SaveState, state will be reported in heartbeat") + } } return err } From 6b0eade6d42824afa5c5f3c52d81cd315f420282 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 10:49:27 -0800 Subject: [PATCH 05/17] storage: upgrade protobuf API in store_state.go --- weed/storage/store_state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/storage/store_state.go b/weed/storage/store_state.go index 39c2bfba8..3959d4109 100644 --- a/weed/storage/store_state.go +++ b/weed/storage/store_state.go @@ -5,10 +5,10 @@ import ( "os" "path/filepath" - "github.com/golang/protobuf/proto" "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/util" + "google.golang.org/protobuf/proto" ) const ( From d6417c9167784bb34edc81669ccc07a874afa54d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 11:56:56 -0800 Subject: [PATCH 06/17] chore(deps): bump github.com/parquet-go/parquet-go from 0.26.3 to 0.26.4 (#8008) Bumps [github.com/parquet-go/parquet-go](https://github.com/parquet-go/parquet-go) from 0.26.3 to 0.26.4. - [Release notes](https://github.com/parquet-go/parquet-go/releases) - [Changelog](https://github.com/parquet-go/parquet-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/parquet-go/parquet-go/compare/v0.26.3...v0.26.4) --- updated-dependencies: - dependency-name: github.com/parquet-go/parquet-go dependency-version: 0.26.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index f90f3fb0d..8d608dd89 100644 --- a/go.mod +++ b/go.mod @@ -133,6 +133,7 @@ require ( github.com/getsentry/sentry-go v0.40.0 github.com/gin-contrib/sessions v1.0.4 github.com/gin-gonic/gin v1.11.0 + github.com/go-ldap/ldap/v3 v3.4.12 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50 github.com/hashicorp/raft v1.7.3 @@ -144,7 +145,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.32 github.com/minio/crc64nvme v1.1.1 github.com/orcaman/concurrent-map/v2 v2.0.1 - github.com/parquet-go/parquet-go v0.26.3 + github.com/parquet-go/parquet-go v0.26.4 github.com/pkg/sftp v1.13.10 github.com/rabbitmq/amqp091-go v1.10.0 github.com/rclone/rclone v1.71.2 @@ -184,7 +185,6 @@ require ( github.com/cockroachdb/version v0.0.0-20250314144055-3860cd14adf2 // indirect github.com/dave/dst v0.27.2 // indirect github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect - github.com/go-ldap/ldap/v3 v3.4.12 // indirect github.com/goccy/go-yaml v1.18.0 // indirect github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect github.com/google/go-cmp v0.7.0 // indirect @@ -369,7 +369,7 @@ require ( github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 github.com/montanaflynn/stats v0.7.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nats-io/nats.go v1.43.0 // indirect diff --git a/go.sum b/go.sum index 3fdbd2de6..bab0ea7ae 100644 --- a/go.sum +++ b/go.sum @@ -643,6 +643,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI= +github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= @@ -1476,8 +1478,8 @@ github.com/parquet-go/bitpack v1.0.0 h1:AUqzlKzPPXf2bCdjfj4sTeacrUwsT7NlcYDMUQxP github.com/parquet-go/bitpack v1.0.0/go.mod h1:XnVk9TH+O40eOOmvpAVZ7K2ocQFrQwysLMnc6M/8lgs= github.com/parquet-go/jsonlite v1.0.0 h1:87QNdi56wOfsE5bdgas0vRzHPxfJgzrXGml1zZdd7VU= github.com/parquet-go/jsonlite v1.0.0/go.mod h1:nDjpkpL4EOtqs6NQugUsi0Rleq9sW/OtC1NnZEnxzF0= -github.com/parquet-go/parquet-go v0.26.3 h1:kJY+xmjcR7BH77tyHqasJpIl3kch/6EIO3TW4tFj69M= -github.com/parquet-go/parquet-go v0.26.3/go.mod h1:h9GcSt41Knf5qXI1tp1TfR8bDBUtvdUMzSKe26aZcHk= +github.com/parquet-go/parquet-go v0.26.4 h1:zJ3l8ef5WJZE2m63pKwyEJ2BhyDlgS0PfOEhuCQQU2A= +github.com/parquet-go/parquet-go v0.26.4/go.mod h1:h9GcSt41Knf5qXI1tp1TfR8bDBUtvdUMzSKe26aZcHk= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= From 138371ce4a070f10f15c56785af9f919c655be3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 12:20:10 -0800 Subject: [PATCH 07/17] chore(deps): bump google.golang.org/grpc from 1.77.0 to 1.78.0 (#8009) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.77.0 to 1.78.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.77.0...v1.78.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-version: 1.78.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8d608dd89..094eeae66 100644 --- a/go.mod +++ b/go.mod @@ -105,7 +105,7 @@ require ( golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/api v0.258.0 google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect - google.golang.org/grpc v1.77.0 + google.golang.org/grpc v1.78.0 google.golang.org/protobuf v1.36.11 gopkg.in/inf.v0 v0.9.1 // indirect modernc.org/b v1.0.0 // indirect @@ -450,7 +450,7 @@ require ( golang.org/x/arch v0.20.0 // indirect golang.org/x/term v0.38.0 // indirect golang.org/x/time v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/validator.v2 v2.0.1 // indirect diff --git a/go.sum b/go.sum index bab0ea7ae..2947e4389 100644 --- a/go.sum +++ b/go.sum @@ -2562,8 +2562,8 @@ google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 h1:Nt6z9UHqSlIdIGJdz6KhTIs2VRx/iOsA5iE8bmQNcxs= google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79/go.mod h1:kTmlBHMPqR5uCZPBvwa2B18mvubkjyY3CRLI0c6fj0s= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA= google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2606,8 +2606,8 @@ google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsA google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= From 9ccc844df03b5396c4fa438db0e599623b382b89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 12:20:23 -0800 Subject: [PATCH 08/17] chore(deps): bump github.com/klauspost/reedsolomon from 1.12.6 to 1.13.0 (#8010) Bumps [github.com/klauspost/reedsolomon](https://github.com/klauspost/reedsolomon) from 1.12.6 to 1.13.0. - [Release notes](https://github.com/klauspost/reedsolomon/releases) - [Commits](https://github.com/klauspost/reedsolomon/compare/v1.12.6...v1.13.0) --- updated-dependencies: - dependency-name: github.com/klauspost/reedsolomon dependency-version: 1.13.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 094eeae66..8eb6f7682 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/karlseguin/ccache/v2 v2.0.8 github.com/klauspost/compress v1.18.2 - github.com/klauspost/reedsolomon v1.12.6 + github.com/klauspost/reedsolomon v1.13.0 github.com/kurin/blazer v0.5.3 github.com/linxGnu/grocksdb v1.10.3 github.com/mailru/easyjson v0.7.7 // indirect diff --git a/go.sum b/go.sum index 2947e4389..d5dfd683b 100644 --- a/go.sum +++ b/go.sum @@ -1334,8 +1334,8 @@ github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxh github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/klauspost/reedsolomon v1.12.6 h1:8pqE9aECQG/ZFitiUD1xK/E83zwosBAZtE3UbuZM8TQ= -github.com/klauspost/reedsolomon v1.12.6/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA= +github.com/klauspost/reedsolomon v1.13.0 h1:E0Cmgf2kMuhZTj6eefnvpKC4/Q4jhCi9YIjcZjK4arc= +github.com/klauspost/reedsolomon v1.13.0/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= From 64a34ff69ba46f9d14a1f2f03c50869ae6707982 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 12:20:52 -0800 Subject: [PATCH 09/17] chore(deps): bump github.com/shirou/gopsutil/v4 from 4.25.11 to 4.25.12 (#8011) Bumps [github.com/shirou/gopsutil/v4](https://github.com/shirou/gopsutil) from 4.25.11 to 4.25.12. - [Release notes](https://github.com/shirou/gopsutil/releases) - [Commits](https://github.com/shirou/gopsutil/compare/v4.25.11...v4.25.12) --- updated-dependencies: - dependency-name: github.com/shirou/gopsutil/v4 dependency-version: 4.25.12 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8eb6f7682..6bfc132d5 100644 --- a/go.mod +++ b/go.mod @@ -153,7 +153,7 @@ require ( github.com/redis/go-redis/v9 v9.17.2 github.com/schollz/progressbar/v3 v3.19.0 github.com/seaweedfs/go-fuse/v2 v2.9.1 - github.com/shirou/gopsutil/v4 v4.25.11 + github.com/shirou/gopsutil/v4 v4.25.12 github.com/tarantool/go-tarantool/v2 v2.4.1 github.com/tikv/client-go/v2 v2.0.7 github.com/xeipuuv/gojsonschema v1.2.0 diff --git a/go.sum b/go.sum index d5dfd683b..524d7c31e 100644 --- a/go.sum +++ b/go.sum @@ -1635,8 +1635,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v4 v4.25.11 h1:X53gB7muL9Gnwwo2evPSE+SfOrltMoR6V3xJAXZILTY= -github.com/shirou/gopsutil/v4 v4.25.11/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU= +github.com/shirou/gopsutil/v4 v4.25.12 h1:e7PvW/0RmJ8p8vPGJH4jvNkOyLmbkXgXW4m6ZPic6CY= +github.com/shirou/gopsutil/v4 v4.25.12/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= From 269092c8c3481c6e320a89abd95013c50bcd267c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 12:22:42 -0800 Subject: [PATCH 10/17] fix(gcs): resolve credential conflict in remote storage mount (#8013) * fix(gcs): resolve credential conflict in remote storage mount Manually handle GCS credentials to avoid conflict with automatic discovery. Fixes #8007 * fix(gcs): use %w for error wrapping in gcs_storage_client.go Address review feedback to use idiomatic error wrapping. --- weed/remote_storage/gcs/gcs_storage_client.go | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/weed/remote_storage/gcs/gcs_storage_client.go b/weed/remote_storage/gcs/gcs_storage_client.go index 8e8a97a1c..b92f81b0f 100644 --- a/weed/remote_storage/gcs/gcs_storage_client.go +++ b/weed/remote_storage/gcs/gcs_storage_client.go @@ -14,6 +14,8 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" "github.com/seaweedfs/seaweedfs/weed/remote_storage" "github.com/seaweedfs/seaweedfs/weed/util" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "google.golang.org/api/iterator" "google.golang.org/api/option" ) @@ -54,7 +56,27 @@ func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage. googleApplicationCredentials = util.ResolvePath(googleApplicationCredentials) - c, err := storage.NewClient(context.Background(), option.WithCredentialsFile(googleApplicationCredentials)) + var clientOpts []option.ClientOption + if googleApplicationCredentials != "" { + var data []byte + var err error + if strings.HasPrefix(googleApplicationCredentials, "{") { + data = []byte(googleApplicationCredentials) + } else { + data, err = os.ReadFile(googleApplicationCredentials) + if err != nil { + return nil, fmt.Errorf("failed to read credentials file %s: %w", googleApplicationCredentials, err) + } + } + creds, err := google.CredentialsFromJSON(context.Background(), data, storage.ScopeFullControl) + if err != nil { + return nil, fmt.Errorf("failed to parse credentials: %w", err) + } + httpClient := oauth2.NewClient(context.Background(), creds.TokenSource) + clientOpts = append(clientOpts, option.WithHTTPClient(httpClient), option.WithoutAuthentication()) + } + + c, err := storage.NewClient(context.Background(), clientOpts...) if err != nil { return nil, fmt.Errorf("failed to create client: %w", err) } From 60f7dbec4db65cd287102edad4c1ce0cd429abc7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 12:40:42 -0800 Subject: [PATCH 11/17] chore(deps): bump github.com/mattn/go-sqlite3 from 1.14.32 to 1.14.33 (#8012) Bumps [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) from 1.14.32 to 1.14.33. - [Release notes](https://github.com/mattn/go-sqlite3/releases) - [Commits](https://github.com/mattn/go-sqlite3/compare/v1.14.32...v1.14.33) --- updated-dependencies: - dependency-name: github.com/mattn/go-sqlite3 dependency-version: 1.14.33 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6bfc132d5..c83fa769f 100644 --- a/go.mod +++ b/go.mod @@ -142,7 +142,7 @@ require ( github.com/jhump/protoreflect v1.17.0 github.com/lib/pq v1.10.9 github.com/linkedin/goavro/v2 v2.14.1 - github.com/mattn/go-sqlite3 v1.14.32 + github.com/mattn/go-sqlite3 v1.14.33 github.com/minio/crc64nvme v1.1.1 github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/parquet-go/parquet-go v0.26.4 diff --git a/go.sum b/go.sum index 524d7c31e..dccce8857 100644 --- a/go.sum +++ b/go.sum @@ -1396,8 +1396,8 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= -github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0= +github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= From 1046bd009a00841f22231af4df2cba29e799a29f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 13:21:48 -0800 Subject: [PATCH 12/17] feat: Optional path-prefix and method scoping for Filer HTTP JWT (#8014) * Implement optional path-prefix and method scoping for Filer HTTP JWT * Fix security vulnerability and improve test error handling * Address PR feedback: replace debug logging and improve tests * Use URL.Path in logs to avoid leaking query params --- weed/security/jwt.go | 4 +- weed/server/filer_jwt_test.go | 143 +++++++++++++++++++++++++++ weed/server/filer_server_handlers.go | 40 +++++++- 3 files changed, 182 insertions(+), 5 deletions(-) create mode 100644 weed/server/filer_jwt_test.go diff --git a/weed/security/jwt.go b/weed/security/jwt.go index d859e9ea8..abea0198d 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -24,6 +24,8 @@ type SeaweedFileIdClaims struct { // Right now, it only contains the standard claims; but this might be extended later // for more fine-grained permissions. type SeaweedFilerClaims struct { + AllowedPrefixes []string `json:"allowed_prefixes,omitempty"` + AllowedMethods []string `json:"allowed_methods,omitempty"` jwt.RegisteredClaims } @@ -56,7 +58,7 @@ func GenJwtForFilerServer(signingKey SigningKey, expiresAfterSec int) EncodedJwt } claims := SeaweedFilerClaims{ - jwt.RegisteredClaims{}, + RegisteredClaims: jwt.RegisteredClaims{}, } if expiresAfterSec > 0 { claims.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Second * time.Duration(expiresAfterSec))) diff --git a/weed/server/filer_jwt_test.go b/weed/server/filer_jwt_test.go new file mode 100644 index 000000000..81539a124 --- /dev/null +++ b/weed/server/filer_jwt_test.go @@ -0,0 +1,143 @@ +package weed_server + +import ( + "net/http/httptest" + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/seaweedfs/seaweedfs/weed/security" +) + +func TestFilerServer_maybeCheckJwtAuthorization_Scoped(t *testing.T) { + signingKey := "secret" + filerGuard := security.NewGuard(nil, signingKey, 0, signingKey, 0) + fs := &FilerServer{ + filerGuard: filerGuard, + } + + // Helper to generate token + genToken := func(allowedPrefixes []string, allowedMethods []string) string { + claims := security.SeaweedFilerClaims{ + AllowedPrefixes: allowedPrefixes, + AllowedMethods: allowedMethods, + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)), + }, + } + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + str, err := token.SignedString([]byte(signingKey)) + if err != nil { + t.Fatalf("failed to sign token: %v", err) + } + return str + } + + tests := []struct { + name string + token string + method string + path string + isWrite bool + expectAuthorized bool + }{ + { + name: "no restrictions", + token: genToken(nil, nil), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: true, + }, + { + name: "allowed prefix match", + token: genToken([]string{"/data"}, nil), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: true, + }, + { + name: "allowed prefix mismatch", + token: genToken([]string{"/private"}, nil), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: false, + }, + { + name: "allowed method match", + token: genToken(nil, []string{"GET"}), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: true, + }, + { + name: "allowed method mismatch", + token: genToken(nil, []string{"POST"}), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: false, + }, + { + name: "both match", + token: genToken([]string{"/data"}, []string{"GET"}), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: true, + }, + { + name: "prefix match, method mismatch", + token: genToken([]string{"/data"}, []string{"POST"}), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: false, + }, + { + name: "multiple prefixes match", + token: genToken([]string{"/other", "/data"}, nil), + method: "GET", + path: "/data/test", + isWrite: false, + expectAuthorized: true, + }, + { + name: "write operation with method restriction", + token: genToken(nil, []string{"POST", "PUT"}), + method: "POST", + path: "/data/upload", + isWrite: true, + expectAuthorized: true, + }, + { + name: "root path with prefix restriction", + token: genToken([]string{"/data"}, nil), + method: "GET", + path: "/", + isWrite: false, + expectAuthorized: false, + }, + { + name: "exact prefix match", + token: genToken([]string{"/data"}, nil), + method: "GET", + path: "/data", + isWrite: false, + expectAuthorized: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(tt.method, tt.path, nil) + req.Header.Set("Authorization", "Bearer "+tt.token) + if authorized := fs.maybeCheckJwtAuthorization(req, tt.isWrite); authorized != tt.expectAuthorized { + t.Errorf("expected authorized=%v, got %v", tt.expectAuthorized, authorized) + } + }) + } +} diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index 57d675740..45653be0f 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -4,7 +4,6 @@ import ( "context" "errors" "net/http" - "os" "strconv" "strings" "sync/atomic" @@ -148,7 +147,7 @@ func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Reque statusRecorder := stats.NewStatusResponseWriter(w) w = statusRecorder - os.Stdout.WriteString("Request: " + r.Method + " " + r.URL.String() + "\n") + glog.V(4).Infof("Request: %s %s", r.Method, r.URL.Path) origin := r.Header.Get("Origin") if origin != "" { @@ -242,9 +241,42 @@ func (fs *FilerServer) maybeCheckJwtAuthorization(r *http.Request, isWrite bool) if !token.Valid { glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) return false - } else { - return true } + + claims, ok := token.Claims.(*security.SeaweedFilerClaims) + if !ok { + glog.V(1).Infof("jwt claims not of type *SeaweedFilerClaims from %s", r.RemoteAddr) + return false + } + + if len(claims.AllowedPrefixes) > 0 { + hasPrefix := false + for _, prefix := range claims.AllowedPrefixes { + if strings.HasPrefix(r.URL.Path, prefix) { + hasPrefix = true + break + } + } + if !hasPrefix { + glog.V(1).Infof("jwt path not allowed from %s: %v", r.RemoteAddr, r.URL.Path) + return false + } + } + if len(claims.AllowedMethods) > 0 { + hasMethod := false + for _, method := range claims.AllowedMethods { + if method == r.Method { + hasMethod = true + break + } + } + if !hasMethod { + glog.V(1).Infof("jwt method not allowed from %s: %v", r.RemoteAddr, r.Method) + return false + } + } + + return true } func (fs *FilerServer) filerHealthzHandler(w http.ResponseWriter, r *http.Request) { From ba97f3cc8eddd2d3c1263741840bd68d8cc55d49 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 15:53:27 -0800 Subject: [PATCH 13/17] Update README.md --- README.md | 76 +++++++++++++++++++++++++------------------------------ 1 file changed, 35 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 26ddd4c86..f5ab2d698 100644 --- a/README.md +++ b/README.md @@ -122,7 +122,7 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two 1. to store billions of files! 2. to serve the files fast! -SeaweedFS started as an Object Store to handle small files efficiently. +SeaweedFS started as a blob store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages volumes on volume servers, and these volume servers manage files and their metadata. @@ -134,16 +134,12 @@ It is so simple with O(1) disk reads that you are welcome to challenge the perfo SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from -[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf) +[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf) and Google's Colossle -On top of the object store, optional [Filer] can support directories and POSIX attributes. +On top of the blob store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, Sqlite, MemSql, TiDB, Etcd, CockroachDB, YDB, etc. -For any distributed key value stores, the large values can be offloaded to SeaweedFS. -With the fast access speed and linearly scalable capacity, -SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore]. - SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity. @@ -153,13 +149,13 @@ Faster and cheaper than direct cloud storage! [Back to TOC](#table-of-contents) # Features # -## Additional Features ## -* Can choose no replication or different replication levels, rack and data center aware. +## Additional Blob Store Features ## +* Support different replication levels, with rack and data center aware. * Automatic master servers failover - no single point of failure (SPOF). -* Automatic Gzip compression depending on file MIME type. +* Automatic compression depending on file MIME type. * Automatic compaction to reclaim disk space after deletion or update. * [Automatic entry TTL expiration][VolumeServerTTL]. -* Any server with some disk space can add to the total storage space. +* Flexible Capacity Expansion: Any server with some disk space can add to the total storage space. * Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands. * Optional picture resizing. * Support ETag, Accept-Range, Last-Modified, etc. @@ -167,7 +163,7 @@ Faster and cheaper than direct cloud storage! * Support rebalancing the writable and readonly volumes. * [Customizable Multiple Storage Tiers][TieredStorage]: Customizable storage disk types to balance performance and cost. * [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data. -* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. +* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. Enterprise version can customize EC ratio. [Back to TOC](#table-of-contents) @@ -213,7 +209,7 @@ Faster and cheaper than direct cloud storage! [Back to TOC](#table-of-contents) -## Example: Using Seaweed Object Store ## +## Example: Using Seaweed Blob Store ## By default, the master node runs on port 9333, and the volume nodes run on port 8080. Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example. @@ -233,23 +229,25 @@ SeaweedFS uses HTTP REST operations to read, write, and delete. The responses ar > weed volume -dir="/tmp/data2" -max=10 -master="localhost:9333" -port=8081 & ``` -### Write File ### +### Write A Blob ### + +A blob, also referred as a needle, a chunk, or mistakenly as a file, is just a byte array. It can have attributes, such as name, mime type, create or update time, etc. But basically it is just a byte array of a relatively small size, such as 2 MB ~ 64 MB. The size is not fixed. -To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL: +To upload a blob: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL: ``` > curl http://localhost:9333/dir/assign {"count":1,"fid":"3,01637037d6","url":"127.0.0.1:8080","publicUrl":"localhost:8080"} ``` -Second, to store the file content, send a HTTP multi-part POST request to `url + '/' + fid` from the response: +Second, to store the blob content, send a HTTP multi-part POST request to `url + '/' + fid` from the response: ``` > curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6 {"name":"myphoto.jpg","size":43234,"eTag":"1cc0118e"} ``` -To update, send another POST request with updated file content. +To update, send another POST request with updated blob content. For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL: @@ -257,7 +255,7 @@ For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL: > curl -X DELETE http://127.0.0.1:8080/3,01637037d6 ``` -### Save File Id ### +### Save Blob Id ### Now, you can save the `fid`, 3,01637037d6 in this case, to a database field. @@ -269,9 +267,9 @@ The file key and file cookie are both coded in hex. You can store the . Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +All blob metadata stored on a volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. ### Tiered Storage to the cloud ### @@ -415,6 +403,12 @@ If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage [Back to TOC](#table-of-contents) +## SeaweedFS Filer ## + +Built on top of the blob store, SeaweedFS Filer adds directory structure to create a file system. The directory sturcture is an interface that is implemented in many key-value stores or databases. + +The content of a file is mapped to one or many blobs, distributed to multiple volumes on multiple volume servers. + ## Compared to Other File Systems ## Most other distributed file systems seem more complicated than necessary. From da83a790c7aa40f10fab458e4bbd04fde7570358 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 15:59:39 -0800 Subject: [PATCH 15/17] Fix link to Google's Colossus File System --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f5ab2d698..581bdf773 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ It is so simple with O(1) disk reads that you are welcome to challenge the perfo SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from -[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf) and Google's Colossle +[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf) and [Google's Colossus File System](https://cloud.google.com/blog/products/storage-data-transfer/a-peek-behind-colossus-googles-file-system) On top of the blob store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, From c023eed8427e1c122534ecd1338798f9dfced27f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 16:00:57 -0800 Subject: [PATCH 16/17] Update Stargazers image link in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 581bdf773..76a9d2553 100644 --- a/README.md +++ b/README.md @@ -656,4 +656,4 @@ The text of this page is available for modification and reuse under the terms of ## Stargazers over time -[![Stargazers over time](https://starchart.cc/chrislusf/seaweedfs.svg)](https://starchart.cc/chrislusf/seaweedfs) +[![Stargazers over time](https://starchart.cc/seaweedfs/seaweedfs.svg)](https://starchart.cc/seaweedfs/seaweedfs) From 2388a2b03664f90f5fdc02983bea1404af11e09d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 12 Jan 2026 16:02:04 -0800 Subject: [PATCH 17/17] Update Stargazers image link to adaptive variant --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 76a9d2553..4859f3d09 100644 --- a/README.md +++ b/README.md @@ -655,5 +655,4 @@ The text of this page is available for modification and reuse under the terms of [Back to TOC](#table-of-contents) ## Stargazers over time - -[![Stargazers over time](https://starchart.cc/seaweedfs/seaweedfs.svg)](https://starchart.cc/seaweedfs/seaweedfs) +[![Stargazers over time](https://starchart.cc/seaweedfs/seaweedfs.svg?variant=adaptive)](https://starchart.cc/seaweedfs/seaweedfs)