Browse Source

Prune Unused Functions from weed/s3api (#8815)

* weed/s3api: prune calculatePartOffset()

* weed/s3api: prune clearCachedListMetadata()

* weed/s3api: prune S3ApiServer.isObjectRetentionActive()

* weed/s3api: prune S3ApiServer.ensureDirectoryAllEmpty()

* weed/s3api: prune s3ApiServer.getEncryptionTypeString()

* weed/s3api: prune newStreamError()

* weed/s3api: prune S3ApiServer.rotateSSECKey()

weed/s3api: prune S3ApiServer.rotateSSEKMSKey()

weed/s3api: prune S3ApiServer.rotateSSEKMSMetadataOnly()

weed/s3api: prune S3ApiServer.rotateSSECChunks()

weed/s3api: prune S3ApiServer.rotateSSEKMSChunks()

weed/s3api: prune S3ApiServer.rotateSSECChunk()

weed/s3api: prune S3ApiServer.rotateSSEKMSChunk()

* weed/s3api: prune addCounterToIV()

* weed/s3api: prune minInt()

* weed/s3api: prune isMethodActionMismatch()

* weed/s3api: prune hasSpecificQueryParameters()

* weed/s3api: prune handlePutToFilerError()

weed/s3api: prune handlePutToFilerInternalError()

weed/s3api: prune logErrorAndReturn()

weed/s3api: prune logInternalError

weed/s3api: prune handleSSEError()

weed/s3api: prune handleSSEInternalError()

* weed/s3api: prune encryptionConfigToProto()

* weed/s3api: prune S3ApiServer.touch()
pull/4306/merge
Lars Lehtonen 12 hours ago
committed by GitHub
parent
commit
b01a74c6bb
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 6
      weed/s3api/filer_util.go
  2. 12
      weed/s3api/s3_bucket_encryption.go
  3. 35
      weed/s3api/s3_error_utils.go
  4. 87
      weed/s3api/s3_iam_middleware.go
  5. 13
      weed/s3api/s3_sse_c.go
  6. 263
      weed/s3api/s3api_key_rotation.go
  7. 5
      weed/s3api/s3api_object_handlers.go
  8. 12
      weed/s3api/s3api_object_handlers_copy.go
  9. 54
      weed/s3api/s3api_object_handlers_list.go
  10. 18
      weed/s3api/s3api_object_retention.go
  11. 10
      weed/s3api/s3api_object_versioning.go
  12. 14
      weed/s3api/s3api_put_handlers.go

6
weed/s3api/filer_util.go

@ -167,12 +167,6 @@ func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isD
}
func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) {
return filer_pb.Touch(context.Background(), s3a, parentDirectoryPath, entryName, entry)
}
func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) {
fullPath := util.NewFullPath(parentDirectoryPath, entryName)
return filer_pb.GetEntry(context.Background(), s3a, fullPath)

12
weed/s3api/s3_bucket_encryption.go

@ -36,18 +36,6 @@ type ApplyServerSideEncryptionByDefault struct {
KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
}
// encryptionConfigToProto converts EncryptionConfiguration to protobuf format
func encryptionConfigToProto(config *s3_pb.EncryptionConfiguration) *s3_pb.EncryptionConfiguration {
if config == nil {
return nil
}
return &s3_pb.EncryptionConfiguration{
SseAlgorithm: config.SseAlgorithm,
KmsKeyId: config.KmsKeyId,
BucketKeyEnabled: config.BucketKeyEnabled,
}
}
// encryptionConfigFromXML converts XML ServerSideEncryptionConfiguration to protobuf
func encryptionConfigFromXML(xmlConfig *ServerSideEncryptionConfiguration) *s3_pb.EncryptionConfiguration {
if xmlConfig == nil || len(xmlConfig.Rules) == 0 {

35
weed/s3api/s3_error_utils.go

@ -7,17 +7,6 @@ import (
// ErrorHandlers provide common error handling patterns for S3 API operations
// handlePutToFilerError logs an error and returns the standard putToFiler error format
func handlePutToFilerError(operation string, err error, errorCode s3err.ErrorCode) (string, s3err.ErrorCode, string) {
glog.Errorf("Failed to %s: %v", operation, err)
return "", errorCode, ""
}
// handlePutToFilerInternalError is a convenience wrapper for internal errors in putToFiler
func handlePutToFilerInternalError(operation string, err error) (string, s3err.ErrorCode, string) {
return handlePutToFilerError(operation, err, s3err.ErrInternalError)
}
// handleMultipartError logs an error and returns the standard multipart error format
func handleMultipartError(operation string, err error, errorCode s3err.ErrorCode) (interface{}, s3err.ErrorCode) {
glog.Errorf("Failed to %s: %v", operation, err)
@ -28,27 +17,3 @@ func handleMultipartError(operation string, err error, errorCode s3err.ErrorCode
func handleMultipartInternalError(operation string, err error) (interface{}, s3err.ErrorCode) {
return handleMultipartError(operation, err, s3err.ErrInternalError)
}
// logErrorAndReturn logs an error with operation context and returns the specified error code
func logErrorAndReturn(operation string, err error, errorCode s3err.ErrorCode) s3err.ErrorCode {
glog.Errorf("Failed to %s: %v", operation, err)
return errorCode
}
// logInternalError is a convenience wrapper for internal error logging
func logInternalError(operation string, err error) s3err.ErrorCode {
return logErrorAndReturn(operation, err, s3err.ErrInternalError)
}
// SSE-specific error handlers
// handleSSEError handles common SSE-related errors with appropriate context
func handleSSEError(sseType string, operation string, err error, errorCode s3err.ErrorCode) (string, s3err.ErrorCode, string) {
glog.Errorf("Failed to %s for %s: %v", operation, sseType, err)
return "", errorCode, ""
}
// handleSSEInternalError is a convenience wrapper for SSE internal errors
func handleSSEInternalError(sseType string, operation string, err error) (string, s3err.ErrorCode, string) {
return handleSSEError(sseType, operation, err, s3err.ErrInternalError)
}

87
weed/s3api/s3_iam_middleware.go

@ -5,7 +5,6 @@ import (
"fmt"
"net"
"net/http"
"net/url"
"strings"
"time"
@ -382,84 +381,6 @@ func buildS3ResourceArn(bucket string, objectKey string) string {
return "arn:aws:s3:::" + bucket + "/" + objectKey
}
// hasSpecificQueryParameters checks if the request has query parameters that indicate specific granular operations
func hasSpecificQueryParameters(query url.Values) bool {
// Check for object-level operation indicators
objectParams := []string{
"acl", // ACL operations
"tagging", // Tagging operations
"retention", // Object retention
"legal-hold", // Legal hold
"versions", // Versioning operations
}
// Check for multipart operation indicators
multipartParams := []string{
"uploads", // List/initiate multipart uploads
"uploadId", // Part operations, complete, abort
"partNumber", // Upload part
}
// Check for bucket-level operation indicators
bucketParams := []string{
"policy", // Bucket policy operations
"website", // Website configuration
"cors", // CORS configuration
"lifecycle", // Lifecycle configuration
"notification", // Event notification
"replication", // Cross-region replication
"encryption", // Server-side encryption
"accelerate", // Transfer acceleration
"requestPayment", // Request payment
"logging", // Access logging
"versioning", // Versioning configuration
"inventory", // Inventory configuration
"analytics", // Analytics configuration
"metrics", // CloudWatch metrics
"location", // Bucket location
}
// Check if any of these parameters are present
allParams := append(append(objectParams, multipartParams...), bucketParams...)
for _, param := range allParams {
if _, exists := query[param]; exists {
return true
}
}
return false
}
// isMethodActionMismatch detects when HTTP method doesn't align with the intended S3 action
// This provides a mechanism to use fallback action mapping when there's a semantic mismatch
func isMethodActionMismatch(method string, fallbackAction Action) bool {
switch fallbackAction {
case s3_constants.ACTION_WRITE:
// WRITE actions should typically use PUT, POST, or DELETE methods
// GET/HEAD methods indicate read-oriented operations
return method == "GET" || method == "HEAD"
case s3_constants.ACTION_READ:
// READ actions should typically use GET or HEAD methods
// PUT, POST, DELETE methods indicate write-oriented operations
return method == "PUT" || method == "POST" || method == "DELETE"
case s3_constants.ACTION_LIST:
// LIST actions should typically use GET method
// PUT, POST, DELETE methods indicate write-oriented operations
return method == "PUT" || method == "POST" || method == "DELETE"
case s3_constants.ACTION_DELETE_BUCKET:
// DELETE_BUCKET should use DELETE method
// Other methods indicate different operation types
return method != "DELETE"
default:
// For unknown actions or actions that already have s3: prefix, don't assume mismatch
return false
}
}
// mapLegacyActionToIAM provides fallback mapping for legacy actions
// This ensures backward compatibility while the system transitions to granular actions
func mapLegacyActionToIAM(legacyAction Action) string {
@ -616,14 +537,6 @@ func ParseUnverifiedJWTToken(tokenString string) (jwt.MapClaims, error) {
return nil, fmt.Errorf("invalid token claims")
}
// minInt returns the minimum of two integers
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
// SetIAMIntegration adds advanced IAM integration to the S3ApiServer
func (s3a *S3ApiServer) SetIAMIntegration(iamManager *integration.IAMManager) {
if s3a.iam != nil {

13
weed/s3api/s3_sse_c.go

@ -272,19 +272,6 @@ func createCTRStreamWithOffset(block cipher.Block, iv []byte, counterOffset uint
return stream
}
// addCounterToIV adds a counter value to the IV (treating last 8 bytes as big-endian counter)
func addCounterToIV(iv []byte, counter uint64) {
// Use the last 8 bytes as a big-endian counter
for i := 7; i >= 0; i-- {
carry := counter & 0xff
iv[len(iv)-8+i] += byte(carry)
if iv[len(iv)-8+i] >= byte(carry) {
break // No overflow
}
counter >>= 8
}
}
// GetSourceSSECInfo extracts SSE-C information from source object metadata
func GetSourceSSECInfo(metadata map[string][]byte) (algorithm string, keyMD5 string, isEncrypted bool) {
if alg, exists := metadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists {

263
weed/s3api/s3api_key_rotation.go

@ -1,275 +1,12 @@
package s3api
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"net/http"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
)
// rotateSSECKey handles SSE-C key rotation for same-object copies
func (s3a *S3ApiServer) rotateSSECKey(entry *filer_pb.Entry, r *http.Request) ([]*filer_pb.FileChunk, error) {
// Parse source and destination SSE-C keys
sourceKey, err := ParseSSECCopySourceHeaders(r)
if err != nil {
return nil, fmt.Errorf("parse SSE-C copy source headers: %w", err)
}
destKey, err := ParseSSECHeaders(r)
if err != nil {
return nil, fmt.Errorf("parse SSE-C destination headers: %w", err)
}
// Validate that we have both keys
if sourceKey == nil {
return nil, fmt.Errorf("source SSE-C key required for key rotation")
}
if destKey == nil {
return nil, fmt.Errorf("destination SSE-C key required for key rotation")
}
// Check if keys are actually different
if sourceKey.KeyMD5 == destKey.KeyMD5 {
glog.V(2).Infof("SSE-C key rotation: keys are identical, using direct copy")
return entry.GetChunks(), nil
}
glog.V(2).Infof("SSE-C key rotation: rotating from key %s to key %s",
sourceKey.KeyMD5[:8], destKey.KeyMD5[:8])
// For SSE-C key rotation, we need to re-encrypt all chunks
// This cannot be a metadata-only operation because the encryption key changes
return s3a.rotateSSECChunks(entry, sourceKey, destKey)
}
// rotateSSEKMSKey handles SSE-KMS key rotation for same-object copies
func (s3a *S3ApiServer) rotateSSEKMSKey(entry *filer_pb.Entry, r *http.Request) ([]*filer_pb.FileChunk, error) {
// Get source and destination key IDs
srcKeyID, srcEncrypted := GetSourceSSEKMSInfo(entry.Extended)
if !srcEncrypted {
return nil, fmt.Errorf("source object is not SSE-KMS encrypted")
}
dstKeyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId)
if dstKeyID == "" {
// Use default key if not specified
dstKeyID = "default"
}
// Check if keys are actually different
if srcKeyID == dstKeyID {
glog.V(2).Infof("SSE-KMS key rotation: keys are identical, using direct copy")
return entry.GetChunks(), nil
}
glog.V(2).Infof("SSE-KMS key rotation: rotating from key %s to key %s", srcKeyID, dstKeyID)
// For SSE-KMS, we can potentially do metadata-only rotation
// if the KMS service supports key aliasing and the data encryption key can be re-wrapped
if s3a.CanDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID) {
return s3a.rotateSSEKMSMetadataOnly(entry, srcKeyID, dstKeyID)
}
// Fallback to full re-encryption
return s3a.rotateSSEKMSChunks(entry, srcKeyID, dstKeyID, r)
}
// CanDoMetadataOnlyKMSRotation determines if KMS key rotation can be done metadata-only
func (s3a *S3ApiServer) CanDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID string) bool {
// For now, we'll be conservative and always re-encrypt
// In a full implementation, this would check if:
// 1. Both keys are in the same KMS instance
// 2. The KMS supports key re-wrapping
// 3. The user has permissions for both keys
return false
}
// rotateSSEKMSMetadataOnly performs metadata-only SSE-KMS key rotation
func (s3a *S3ApiServer) rotateSSEKMSMetadataOnly(entry *filer_pb.Entry, srcKeyID, dstKeyID string) ([]*filer_pb.FileChunk, error) {
// This would re-wrap the data encryption key with the new KMS key
// For now, return an error since we don't support this yet
return nil, fmt.Errorf("metadata-only KMS key rotation not yet implemented")
}
// rotateSSECChunks re-encrypts all chunks with new SSE-C key
func (s3a *S3ApiServer) rotateSSECChunks(entry *filer_pb.Entry, sourceKey, destKey *SSECustomerKey) ([]*filer_pb.FileChunk, error) {
// Get IV from entry metadata
iv, err := GetSSECIVFromMetadata(entry.Extended)
if err != nil {
return nil, fmt.Errorf("get SSE-C IV from metadata: %w", err)
}
var rotatedChunks []*filer_pb.FileChunk
for _, chunk := range entry.GetChunks() {
rotatedChunk, err := s3a.rotateSSECChunk(chunk, sourceKey, destKey, iv)
if err != nil {
return nil, fmt.Errorf("rotate SSE-C chunk: %w", err)
}
rotatedChunks = append(rotatedChunks, rotatedChunk)
}
// Generate new IV for the destination and store it in entry metadata
newIV := make([]byte, s3_constants.AESBlockSize)
if _, err := io.ReadFull(rand.Reader, newIV); err != nil {
return nil, fmt.Errorf("generate new IV: %w", err)
}
// Update entry metadata with new IV and SSE-C headers
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
StoreSSECIVInMetadata(entry.Extended, newIV)
entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256")
entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(destKey.KeyMD5)
return rotatedChunks, nil
}
// rotateSSEKMSChunks re-encrypts all chunks with new SSE-KMS key
func (s3a *S3ApiServer) rotateSSEKMSChunks(entry *filer_pb.Entry, srcKeyID, dstKeyID string, r *http.Request) ([]*filer_pb.FileChunk, error) {
var rotatedChunks []*filer_pb.FileChunk
// Parse encryption context and bucket key settings
_, encryptionContext, bucketKeyEnabled, err := ParseSSEKMSCopyHeaders(r)
if err != nil {
return nil, fmt.Errorf("parse SSE-KMS copy headers: %w", err)
}
for _, chunk := range entry.GetChunks() {
rotatedChunk, err := s3a.rotateSSEKMSChunk(chunk, srcKeyID, dstKeyID, encryptionContext, bucketKeyEnabled)
if err != nil {
return nil, fmt.Errorf("rotate SSE-KMS chunk: %w", err)
}
rotatedChunks = append(rotatedChunks, rotatedChunk)
}
return rotatedChunks, nil
}
// rotateSSECChunk rotates a single SSE-C encrypted chunk
func (s3a *S3ApiServer) rotateSSECChunk(chunk *filer_pb.FileChunk, sourceKey, destKey *SSECustomerKey, iv []byte) (*filer_pb.FileChunk, error) {
// Create new chunk with same properties
newChunk := &filer_pb.FileChunk{
Offset: chunk.Offset,
Size: chunk.Size,
ModifiedTsNs: chunk.ModifiedTsNs,
ETag: chunk.ETag,
}
// Assign new volume for the rotated chunk
assignResult, err := s3a.assignNewVolume("")
if err != nil {
return nil, fmt.Errorf("assign new volume: %w", err)
}
// Set file ID on new chunk
if err := s3a.setChunkFileId(newChunk, assignResult); err != nil {
return nil, err
}
// Get source chunk data
fileId := chunk.GetFileIdString()
srcUrl, err := s3a.lookupVolumeUrl(fileId)
if err != nil {
return nil, fmt.Errorf("lookup source volume: %w", err)
}
// Download encrypted data
encryptedData, err := s3a.downloadChunkData(srcUrl, fileId, 0, int64(chunk.Size), chunk.CipherKey)
if err != nil {
return nil, fmt.Errorf("download chunk data: %w", err)
}
// Decrypt with source key using provided IV
decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), sourceKey, iv)
if err != nil {
return nil, fmt.Errorf("create decrypted reader: %w", err)
}
decryptedData, err := io.ReadAll(decryptedReader)
if err != nil {
return nil, fmt.Errorf("decrypt data: %w", err)
}
// Re-encrypt with destination key
encryptedReader, _, err := CreateSSECEncryptedReader(bytes.NewReader(decryptedData), destKey)
if err != nil {
return nil, fmt.Errorf("create encrypted reader: %w", err)
}
// Note: IV will be handled at the entry level by the calling function
reencryptedData, err := io.ReadAll(encryptedReader)
if err != nil {
return nil, fmt.Errorf("re-encrypt data: %w", err)
}
// Update chunk size to include new IV
newChunk.Size = uint64(len(reencryptedData))
// Upload re-encrypted data
if err := s3a.uploadChunkData(reencryptedData, assignResult, false); err != nil {
return nil, fmt.Errorf("upload re-encrypted data: %w", err)
}
return newChunk, nil
}
// rotateSSEKMSChunk rotates a single SSE-KMS encrypted chunk
func (s3a *S3ApiServer) rotateSSEKMSChunk(chunk *filer_pb.FileChunk, srcKeyID, dstKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool) (*filer_pb.FileChunk, error) {
// Create new chunk with same properties
newChunk := &filer_pb.FileChunk{
Offset: chunk.Offset,
Size: chunk.Size,
ModifiedTsNs: chunk.ModifiedTsNs,
ETag: chunk.ETag,
}
// Assign new volume for the rotated chunk
assignResult, err := s3a.assignNewVolume("")
if err != nil {
return nil, fmt.Errorf("assign new volume: %w", err)
}
// Set file ID on new chunk
if err := s3a.setChunkFileId(newChunk, assignResult); err != nil {
return nil, err
}
// Get source chunk data
fileId := chunk.GetFileIdString()
srcUrl, err := s3a.lookupVolumeUrl(fileId)
if err != nil {
return nil, fmt.Errorf("lookup source volume: %w", err)
}
// Download data (this would be encrypted with the old KMS key)
chunkData, err := s3a.downloadChunkData(srcUrl, fileId, 0, int64(chunk.Size), chunk.CipherKey)
if err != nil {
return nil, fmt.Errorf("download chunk data: %w", err)
}
// For now, we'll just re-upload the data as-is
// In a full implementation, this would:
// 1. Decrypt with old KMS key
// 2. Re-encrypt with new KMS key
// 3. Update metadata accordingly
// Upload data with new key (placeholder implementation)
if err := s3a.uploadChunkData(chunkData, assignResult, false); err != nil {
return nil, fmt.Errorf("upload rotated data: %w", err)
}
return newChunk, nil
}
// IsSameObjectCopy determines if this is a same-object copy operation
func IsSameObjectCopy(r *http.Request, srcBucket, srcObject, dstBucket, dstObject string) bool {
return srcBucket == dstBucket && srcObject == dstObject

5
weed/s3api/s3api_object_handlers.go

@ -244,11 +244,6 @@ func (e *StreamError) Unwrap() error {
return e.Err
}
// newStreamError creates a StreamError for cases where response hasn't been written yet
func newStreamError(err error) *StreamError {
return &StreamError{Err: err, ResponseWritten: false}
}
// newStreamErrorWithResponse creates a StreamError for cases where response was already written
func newStreamErrorWithResponse(err error) *StreamError {
return &StreamError{Err: err, ResponseWritten: true}

12
weed/s3api/s3api_object_handlers_copy.go

@ -2099,18 +2099,6 @@ func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sour
return dstChunk, nil
}
// getEncryptionTypeString returns a string representation of encryption type for logging
func (s3a *S3ApiServer) getEncryptionTypeString(isSSEC, isSSEKMS, isSSES3 bool) string {
if isSSEC {
return s3_constants.SSETypeC
} else if isSSEKMS {
return s3_constants.SSETypeKMS
} else if isSSES3 {
return s3_constants.SSETypeS3
}
return "Plain"
}
// copyChunksWithSSEC handles SSE-C aware copying with smart fast/slow path selection
// Returns chunks and destination metadata that should be applied to the destination entry
func (s3a *S3ApiServer) copyChunksWithSSEC(entry *filer_pb.Entry, r *http.Request) ([]*filer_pb.FileChunk, map[string][]byte, error) {

54
weed/s3api/s3api_object_handlers_list.go

@ -762,60 +762,6 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
return
}
func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) {
// println("+ ensureDirectoryAllEmpty", dir, name)
glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name)
defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty)
var fileCounter int
var subDirs []string
currentDir := parentDir + "/" + name
var startFrom string
var isExhausted bool
var foundEntry bool
for fileCounter == 0 && !isExhausted && err == nil {
err = filer_pb.SeaweedList(context.Background(), filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
foundEntry = true
if entry.IsOlderDir() {
subDirs = append(subDirs, entry.Name)
} else {
fileCounter++
}
startFrom = entry.Name
isExhausted = isExhausted || isLast
glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast)
return nil
}, startFrom, false, 8)
if !foundEntry {
break
}
}
if err != nil {
return false, err
}
if fileCounter > 0 {
return false, nil
}
for _, subDir := range subDirs {
isSubEmpty, subErr := s3a.ensureDirectoryAllEmpty(filerClient, currentDir, subDir)
if subErr != nil {
return false, subErr
}
if !isSubEmpty {
return false, nil
}
}
glog.V(1).Infof("deleting empty folder %s", currentDir)
if err = doDeleteEntry(filerClient, parentDir, name, true, false); err != nil {
return
}
return true, nil
}
// compareWithDelimiter compares two strings for sorting, treating the delimiter character
// as having lower precedence than other characters to match AWS S3 behavior.
// For example, with delimiter '/', 'foo/' should come before 'foo+1/' even though '+' < '/' in ASCII.

18
weed/s3api/s3api_object_retention.go

@ -463,24 +463,6 @@ func (s3a *S3ApiServer) setObjectLegalHold(bucket, object, versionId string, leg
// PROTECTION ENFORCEMENT
// ====================================================================
// isObjectRetentionActive checks if object has active retention
func (s3a *S3ApiServer) isObjectRetentionActive(bucket, object, versionId string) (bool, error) {
retention, err := s3a.getObjectRetention(bucket, object, versionId)
if err != nil {
// If no retention found, object is not under retention
if errors.Is(err, ErrNoRetentionConfiguration) {
return false, nil
}
return false, err
}
if retention.RetainUntilDate != nil && retention.RetainUntilDate.After(time.Now()) {
return true, nil
}
return false, nil
}
// getRetentionFromEntry extracts retention configuration from filer entry
func (s3a *S3ApiServer) getRetentionFromEntry(entry *filer_pb.Entry) (*ObjectRetention, bool, error) {
if entry.Extended == nil {

10
weed/s3api/s3api_object_versioning.go

@ -72,16 +72,6 @@ func setCachedListMetadata(versionsEntry, versionEntry *filer_pb.Entry) {
}
}
// clearCachedListMetadata removes all cached list metadata from the .versions directory entry
func clearCachedListMetadata(extended map[string][]byte) {
if extended == nil {
return
}
delete(extended, s3_constants.ExtLatestVersionIdKey)
delete(extended, s3_constants.ExtLatestVersionFileNameKey)
clearCachedVersionMetadata(extended)
}
// S3ListObjectVersionsResult - Custom struct for S3 list-object-versions response.
// This avoids conflicts with the XSD generated ListVersionsResult struct.
//

14
weed/s3api/s3api_put_handlers.go

@ -23,20 +23,6 @@ type PutToFilerEncryptionResult struct {
SSES3Metadata []byte
}
// calculatePartOffset calculates unique offset for each part to prevent IV reuse in multipart uploads
// AWS S3 part numbers must start from 1, never 0 or negative
func calculatePartOffset(partNumber int) int64 {
// AWS S3 part numbers must start from 1, never 0 or negative
if partNumber < 1 {
glog.Errorf("Invalid partNumber: %d. Must be >= 1.", partNumber)
return 0
}
// Using a large multiplier to ensure block offsets for different parts do not overlap.
// S3 part size limit is 5GB, so this provides a large safety margin.
partOffset := int64(partNumber-1) * s3_constants.PartOffsetMultiplier
return partOffset
}
// handleSSECEncryption processes SSE-C encryption for the data reader
func (s3a *S3ApiServer) handleSSECEncryption(r *http.Request, dataReader io.Reader) (io.Reader, *SSECustomerKey, []byte, s3err.ErrorCode) {
// Handle SSE-C encryption if requested

Loading…
Cancel
Save