Browse Source

obsolete code for s3 on filer read/write handlers

pull/7481/head
chrislu 2 months ago
parent
commit
235054db0b
  1. 26
      weed/server/filer_server_handlers_read.go
  2. 50
      weed/server/filer_server_handlers_write_autochunk.go
  3. 68
      weed/server/filer_server_handlers_write_upload.go

26
weed/server/filer_server_handlers_read.go

@ -221,32 +221,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.Header().Set(s3_constants.AmzTagCount, strconv.Itoa(tagCount))
}
// Set SSE metadata headers for S3 API consumption
if sseIV, exists := entry.Extended[s3_constants.SeaweedFSSSEIV]; exists {
// Convert binary IV to base64 for HTTP header
ivBase64 := base64.StdEncoding.EncodeToString(sseIV)
w.Header().Set(s3_constants.SeaweedFSSSEIVHeader, ivBase64)
}
// Set SSE-C algorithm and key MD5 headers for S3 API response
if sseAlgorithm, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists {
w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, string(sseAlgorithm))
}
if sseKeyMD5, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5]; exists {
w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, string(sseKeyMD5))
}
if sseKMSKey, exists := entry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists {
// Convert binary KMS metadata to base64 for HTTP header
kmsBase64 := base64.StdEncoding.EncodeToString(sseKMSKey)
w.Header().Set(s3_constants.SeaweedFSSSEKMSKeyHeader, kmsBase64)
}
if _, exists := entry.Extended[s3_constants.SeaweedFSSSES3Key]; exists {
// Set standard S3 SSE-S3 response header (not the internal SeaweedFS header)
w.Header().Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmAES256)
}
SetEtag(w, etag)
filename := entry.Name()

50
weed/server/filer_server_handlers_write_autochunk.go

@ -3,7 +3,6 @@ package weed_server
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
@ -174,10 +173,6 @@ func skipCheckParentDirEntry(r *http.Request) bool {
return r.URL.Query().Get("skipCheckParentDir") == "true"
}
func isS3Request(r *http.Request) bool {
return r.Header.Get(s3_constants.AmzAuthType) != "" || r.Header.Get("X-Amz-Date") != ""
}
func (fs *FilerServer) checkPermissions(ctx context.Context, r *http.Request, fileName string) error {
fullPath := fs.fixFilePath(ctx, r, fileName)
enforced, err := fs.wormEnforcedForEntry(ctx, fullPath)
@ -357,52 +352,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
}
// Process SSE metadata headers sent by S3 API and store in entry extended metadata
if sseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEIVHeader); sseIVHeader != "" {
// Decode base64-encoded IV and store in metadata
if ivData, err := base64.StdEncoding.DecodeString(sseIVHeader); err == nil {
entry.Extended[s3_constants.SeaweedFSSSEIV] = ivData
glog.V(4).Infof("Stored SSE-C IV metadata for %s", entry.FullPath)
} else {
glog.Errorf("Failed to decode SSE-C IV header for %s: %v", entry.FullPath, err)
}
}
// Store SSE-C algorithm and key MD5 for proper S3 API response headers
if sseAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm); sseAlgorithm != "" {
entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte(sseAlgorithm)
glog.V(4).Infof("Stored SSE-C algorithm metadata for %s", entry.FullPath)
}
if sseKeyMD5 := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5); sseKeyMD5 != "" {
entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(sseKeyMD5)
glog.V(4).Infof("Stored SSE-C key MD5 metadata for %s", entry.FullPath)
}
if sseKMSHeader := r.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader); sseKMSHeader != "" {
// Decode base64-encoded KMS metadata and store
if kmsData, err := base64.StdEncoding.DecodeString(sseKMSHeader); err == nil {
entry.Extended[s3_constants.SeaweedFSSSEKMSKey] = kmsData
glog.V(4).Infof("Stored SSE-KMS metadata for %s", entry.FullPath)
} else {
glog.Errorf("Failed to decode SSE-KMS metadata header for %s: %v", entry.FullPath, err)
}
}
if sseS3Header := r.Header.Get(s3_constants.SeaweedFSSSES3Key); sseS3Header != "" {
// Decode base64-encoded S3 metadata and store
if s3Data, err := base64.StdEncoding.DecodeString(sseS3Header); err == nil {
entry.Extended[s3_constants.SeaweedFSSSES3Key] = s3Data
glog.V(4).Infof("Stored SSE-S3 metadata for %s", entry.FullPath)
} else {
glog.Errorf("Failed to decode SSE-S3 metadata header for %s: %v", entry.FullPath, err)
}
}
dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, skipCheckParentDirEntry(r), so.MaxFileNameLength)
// In test_bucket_listv2_delimiter_basic, the valid object key is the parent folder
if dbErr != nil && strings.HasSuffix(dbErr.Error(), " is a file") && isS3Request(r) {
dbErr = fs.filer.CreateEntry(ctx, entry, false, false, nil, true, so.MaxFileNameLength)
}
if dbErr != nil {
replyerr = dbErr
filerResult.Error = dbErr.Error()

68
weed/server/filer_server_handlers_write_upload.go

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"fmt"
"hash"
"io"
@ -15,12 +14,9 @@ import (
"slices"
"encoding/json"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
@ -248,70 +244,6 @@ func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request,
var sseType filer_pb.SSEType = filer_pb.SSEType_NONE
var sseMetadata []byte
if r != nil {
// Check for SSE-KMS
sseKMSHeaderValue := r.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader)
if sseKMSHeaderValue != "" {
sseType = filer_pb.SSEType_SSE_KMS
if kmsData, err := base64.StdEncoding.DecodeString(sseKMSHeaderValue); err == nil {
sseMetadata = kmsData
glog.V(4).InfofCtx(ctx, "Storing SSE-KMS metadata for chunk %s at offset %d", fileId, chunkOffset)
} else {
glog.V(1).InfofCtx(ctx, "Failed to decode SSE-KMS metadata for chunk %s: %v", fileId, err)
}
} else if r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) != "" {
// SSE-C: Create per-chunk metadata for unified handling
sseType = filer_pb.SSEType_SSE_C
// Get SSE-C metadata from headers to create unified per-chunk metadata
sseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEIVHeader)
keyMD5Header := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5)
if sseIVHeader != "" && keyMD5Header != "" {
// Decode IV from header
if ivData, err := base64.StdEncoding.DecodeString(sseIVHeader); err == nil {
// Create SSE-C metadata with chunk offset = chunkOffset for proper IV calculation
ssecMetadataStruct := struct {
Algorithm string `json:"algorithm"`
IV string `json:"iv"`
KeyMD5 string `json:"keyMD5"`
PartOffset int64 `json:"partOffset"`
}{
Algorithm: "AES256",
IV: base64.StdEncoding.EncodeToString(ivData),
KeyMD5: keyMD5Header,
PartOffset: chunkOffset,
}
if ssecMetadata, serErr := json.Marshal(ssecMetadataStruct); serErr == nil {
sseMetadata = ssecMetadata
} else {
glog.V(1).InfofCtx(ctx, "Failed to serialize SSE-C metadata for chunk %s: %v", fileId, serErr)
}
} else {
glog.V(1).InfofCtx(ctx, "Failed to decode SSE-C IV for chunk %s: %v", fileId, err)
}
} else {
glog.V(4).InfofCtx(ctx, "SSE-C chunk %s missing IV or KeyMD5 header", fileId)
}
} else if r.Header.Get(s3_constants.SeaweedFSSSES3Key) != "" {
// SSE-S3: Server-side encryption with server-managed keys
// Set the correct SSE type for SSE-S3 chunks to maintain proper tracking
sseType = filer_pb.SSEType_SSE_S3
// Get SSE-S3 metadata from headers
sseS3Header := r.Header.Get(s3_constants.SeaweedFSSSES3Key)
if sseS3Header != "" {
if s3Data, err := base64.StdEncoding.DecodeString(sseS3Header); err == nil {
// For SSE-S3, store metadata at chunk level for consistency with SSE-KMS/SSE-C
glog.V(4).InfofCtx(ctx, "Storing SSE-S3 metadata for chunk %s at offset %d", fileId, chunkOffset)
sseMetadata = s3Data
} else {
glog.V(1).InfofCtx(ctx, "Failed to decode SSE-S3 metadata for chunk %s: %v", fileId, err)
}
}
}
}
// Create chunk with SSE metadata if available
var chunk *filer_pb.FileChunk

Loading…
Cancel
Save