Browse Source

remove obsolete

pull/7523/head
chrislu 2 weeks ago
parent
commit
e10cd469d9
  1. 1
      weed/s3api/s3_constants/header.go
  2. 20
      weed/server/filer_server_handlers_read.go
  3. 14
      weed/server/filer_server_handlers_write_autochunk.go

1
weed/s3api/s3_constants/header.go

@ -45,7 +45,6 @@ const (
AmzObjectTaggingDirective = "X-Amz-Tagging-Directive"
AmzTagCount = "x-amz-tagging-count"
SeaweedFSPartNumber = "X-Seaweedfs-Part-Number"
SeaweedFSUploadId = "X-Seaweedfs-Upload-Id"
SeaweedFSMultipartPartsCount = "X-Seaweedfs-Multipart-Parts-Count"
SeaweedFSMultipartPartBoundaries = "X-Seaweedfs-Multipart-Part-Boundaries" // JSON: [{part:1,start:0,end:2,etag:"abc"},{part:2,start:2,end:3,etag:"def"}]

20
weed/server/filer_server_handlers_read.go

@ -2,8 +2,6 @@ package weed_server
import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
@ -160,22 +158,8 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return
}
var etag string
if partNumber, errNum := strconv.Atoi(r.Header.Get(s3_constants.SeaweedFSPartNumber)); errNum == nil {
if len(entry.Chunks) < partNumber {
stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadChunk).Inc()
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("InvalidPart"))
return
}
w.Header().Set(s3_constants.AmzMpPartsCount, strconv.Itoa(len(entry.Chunks)))
partChunk := entry.GetChunks()[partNumber-1]
md5, _ := base64.StdEncoding.DecodeString(partChunk.ETag)
etag = hex.EncodeToString(md5)
r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", partChunk.Offset, uint64(partChunk.Offset)+partChunk.Size-1))
} else {
etag = filer.ETagEntry(entry)
}
// Generate ETag for response
etag := filer.ETagEntry(entry)
w.Header().Set("Accept-Ranges", "bytes")
// mime type

14
weed/server/filer_server_handlers_write_autochunk.go

@ -135,15 +135,9 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
if err := fs.checkPermissions(ctx, r, fileName); err != nil {
return nil, nil, err
}
// Disable TTL-based (creation time) deletion when S3 expiry (modification time) is enabled
// Note: S3 API now sets SeaweedFSExpiresS3 directly in metadata, not via header
// TTL handling is done based on metadata, not request headers
soMaybeWithOutTTL := so
if so.TtlSeconds > 0 {
if s3ExpiresValue := r.Header.Get(s3_constants.SeaweedFSExpiresS3); s3ExpiresValue == "true" {
clone := *so
clone.TtlSeconds = 0
soMaybeWithOutTTL = &clone
}
}
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadRequestToChunks(ctx, w, r, r.Body, chunkSize, fileName, contentType, contentLength, soMaybeWithOutTTL)
@ -334,9 +328,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
entry.Extended = SaveAmzMetaData(r, entry.Extended, false)
if entry.TtlSec > 0 && r.Header.Get(s3_constants.SeaweedFSExpiresS3) == "true" {
entry.Extended[s3_constants.SeaweedFSExpiresS3] = []byte("true")
}
// Note: S3 API now sets SeaweedFSExpiresS3 directly in metadata via gRPC, not HTTP headers
for k, v := range r.Header {
if len(v) > 0 && len(v[0]) > 0 {
if strings.HasPrefix(k, needle.PairNamePrefix) || k == "Cache-Control" || k == "Expires" || k == "Content-Disposition" {

Loading…
Cancel
Save