Browse Source

implement s3 streaming-unsigned-payload-trailer (#6539)

* implement s3 streaming-unsigned-payload-trailer

* chore: remove print
pull/6045/merge
Tom Crasset 2 weeks ago
committed by GitHub
parent
commit
9604db2c93
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 288
      weed/s3api/chunked_reader_v4.go
  2. 139
      weed/s3api/chunked_reader_v4_test.go
  3. 2
      weed/s3api/s3api_object_handlers_multipart.go
  4. 2
      weed/s3api/s3api_object_handlers_put.go

288
weed/s3api/chunked_reader_v4.go

@ -21,10 +21,15 @@ package s3api
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"crypto/sha1"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt"
"hash" "hash"
"hash/crc32"
"hash/crc64"
"io" "io"
"net/http" "net/http"
"time" "time"
@ -139,14 +144,51 @@ var errLineTooLong = errors.New("header line too long")
// Malformed encoding is generated when chunk header is wrongly formed. // Malformed encoding is generated when chunk header is wrongly formed.
var errMalformedEncoding = errors.New("malformed chunked encoding") var errMalformedEncoding = errors.New("malformed chunked encoding")
// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r
// newChunkedReader returns a new s3ChunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it. // out of HTTP "chunked" format before returning it.
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) {
ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req)
func (iam *IdentityAccessManagement) newChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) {
glog.V(3).Infof("creating a new newSignV4ChunkedReader")
contentSha256Header := req.Header.Get("X-Amz-Content-Sha256")
authorizationHeader := req.Header.Get("Authorization")
var ident *Credential
var seedSignature, region string
var seedDate time.Time
var errCode s3err.ErrorCode
switch contentSha256Header {
// Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
case streamingContentSHA256:
glog.V(3).Infof("streaming content sha256")
ident, seedSignature, region, seedDate, errCode = iam.calculateSeedSignature(req)
if errCode != s3err.ErrNone {
return nil, errCode
}
case streamingUnsignedPayload:
glog.V(3).Infof("streaming unsigned payload")
if authorizationHeader != "" {
// We do not need to pass the seed signature to the Reader as each chunk is not signed,
// but we do compute it to verify the caller has the correct permissions.
_, _, _, _, errCode = iam.calculateSeedSignature(req)
if errCode != s3err.ErrNone { if errCode != s3err.ErrNone {
return nil, errCode return nil, errCode
} }
}
}
// Get the checksum algorithm from the x-amz-trailer Header.
amzTrailerHeader := req.Header.Get("x-amz-trailer")
checksumAlgorithm, err := extractChecksumAlgorithm(amzTrailerHeader)
if err != nil {
glog.V(3).Infof("error extracting checksum algorithm: %v", err)
return nil, s3err.ErrInvalidRequest
}
checkSumWriter := getCheckSumWriter(checksumAlgorithm)
return &s3ChunkedReader{ return &s3ChunkedReader{
cred: ident, cred: ident,
reader: bufio.NewReader(req.Body), reader: bufio.NewReader(req.Body),
@ -154,11 +196,33 @@ func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (
seedDate: seedDate, seedDate: seedDate,
region: region, region: region,
chunkSHA256Writer: sha256.New(), chunkSHA256Writer: sha256.New(),
checkSumAlgorithm: checksumAlgorithm.String(),
checkSumWriter: checkSumWriter,
state: readChunkHeader, state: readChunkHeader,
iam: iam, iam: iam,
}, s3err.ErrNone }, s3err.ErrNone
} }
func extractChecksumAlgorithm(amzTrailerHeader string) (ChecksumAlgorithm, error) {
// Extract checksum algorithm from the x-amz-trailer header.
switch amzTrailerHeader {
case "x-amz-checksum-crc32":
return ChecksumAlgorithmCRC32, nil
case "x-amz-checksum-crc32c":
return ChecksumAlgorithmCRC32C, nil
case "x-amz-checksum-crc64nvme":
return ChecksumAlgorithmCRC64NVMe, nil
case "x-amz-checksum-sha1":
return ChecksumAlgorithmSHA1, nil
case "x-amz-checksum-sha256":
return ChecksumAlgorithmSHA256, nil
case "":
return ChecksumAlgorithmNone, nil
default:
return ChecksumAlgorithmNone, errors.New("unsupported checksum algorithm '" + amzTrailerHeader + "'")
}
}
// Represents the overall state that is required for decoding a // Represents the overall state that is required for decoding a
// AWS Signature V4 chunked reader. // AWS Signature V4 chunked reader.
type s3ChunkedReader struct { type s3ChunkedReader struct {
@ -169,7 +233,9 @@ type s3ChunkedReader struct {
region string region string
state chunkState state chunkState
lastChunk bool lastChunk bool
chunkSignature string
chunkSignature string // Empty string if unsigned streaming upload.
checkSumAlgorithm string // Empty string if no checksum algorithm is specified.
checkSumWriter hash.Hash
chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data.
n uint64 // Unread bytes in chunk n uint64 // Unread bytes in chunk
err error err error
@ -179,8 +245,11 @@ type s3ChunkedReader struct {
// Read chunk reads the chunk token signature portion. // Read chunk reads the chunk token signature portion.
func (cr *s3ChunkedReader) readS3ChunkHeader() { func (cr *s3ChunkedReader) readS3ChunkHeader() {
// Read the first chunk line until CRLF. // Read the first chunk line until CRLF.
var hexChunkSize, hexChunkSignature []byte
hexChunkSize, hexChunkSignature, cr.err = readChunkLine(cr.reader)
var bytesRead, hexChunkSize, hexChunkSignature []byte
bytesRead, cr.err = readChunkLine(cr.reader)
// Parse s3 specific chunk extension and fetch the values.
hexChunkSize, hexChunkSignature = parseS3ChunkExtension(bytesRead)
if cr.err != nil { if cr.err != nil {
return return
} }
@ -192,8 +261,14 @@ func (cr *s3ChunkedReader) readS3ChunkHeader() {
if cr.n == 0 { if cr.n == 0 {
cr.err = io.EOF cr.err = io.EOF
} }
// Save the incoming chunk signature. // Save the incoming chunk signature.
if hexChunkSignature == nil {
// We are using unsigned streaming upload.
cr.chunkSignature = ""
} else {
cr.chunkSignature = string(hexChunkSignature) cr.chunkSignature = string(hexChunkSignature)
}
} }
type chunkState int type chunkState int
@ -202,7 +277,9 @@ const (
readChunkHeader chunkState = iota readChunkHeader chunkState = iota
readChunkTrailer readChunkTrailer
readChunk readChunk
readTrailerChunk
verifyChunk verifyChunk
verifyChecksum
eofChunk eofChunk
) )
@ -215,8 +292,12 @@ func (cs chunkState) String() string {
stateString = "readChunkTrailer" stateString = "readChunkTrailer"
case readChunk: case readChunk:
stateString = "readChunk" stateString = "readChunk"
case readTrailerChunk:
stateString = "readTrailerChunk"
case verifyChunk: case verifyChunk:
stateString = "verifyChunk" stateString = "verifyChunk"
case verifyChecksum:
stateString = "verifyChecksum"
case eofChunk: case eofChunk:
stateString = "eofChunk" stateString = "eofChunk"
@ -246,11 +327,81 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
} }
cr.state = readChunk cr.state = readChunk
case readChunkTrailer: case readChunkTrailer:
cr.err = readCRLF(cr.reader)
if cr.err != nil {
err = peekCRLF(cr.reader)
isTrailingChunk := cr.n == 0 && cr.lastChunk
if !isTrailingChunk {
// If we're not in the trailing chunk, we should consume the bytes no matter what.
// The error returned by peekCRLF is the same as the one by readCRLF.
readCRLF(cr.reader)
cr.err = err
} else if err != nil && err != errMalformedEncoding {
cr.err = err
return 0, errMalformedEncoding return 0, errMalformedEncoding
} else { // equivalent to isTrailingChunk && err == errMalformedEncoding
// FIXME: The "right" structure of the last chunk as provided by the examples in the
// AWS documentation is "0\r\n\r\n" instead of "0\r\n", but some s3 clients when calling with
// streaming-unsigned-payload-trailer omit the last CRLF. To avoid returning an error that, we need to accept both.
// We arrive here when we're at the end of the 0-byte chunk, depending on the client implementation
// the client may or may not send the optional CRLF after the 0-byte chunk.
// If the client sends the optional CRLF, we should consume it.
if err == nil {
readCRLF(cr.reader)
}
} }
// If we're using unsigned streaming upload, there is no signature to verify at each chunk.
if cr.chunkSignature != "" {
cr.state = verifyChunk cr.state = verifyChunk
} else if cr.lastChunk {
cr.state = readTrailerChunk
} else {
cr.state = readChunkHeader
}
case readTrailerChunk:
// When using unsigned upload, this would be the raw contents of the trailer chunk:
//
// x-amz-checksum-crc32:YABb/g==\n\r\n\r\n // Trailer chunk (note optional \n character)
// \r\n // CRLF
//
// When using signed upload with an additional checksum algorithm, this would be the raw contents of the trailer chunk:
//
// x-amz-checksum-crc32:YABb/g==\n\r\n // Trailer chunk (note optional \n character)
// trailer-signature\r\n
// \r\n // CRLF
//
// This implementation currently only supports the first case.
// TODO: Implement the second case (signed upload with additional checksum computation for each chunk)
extractedCheckSumAlgorithm, extractedChecksum := parseChunkChecksum(cr.reader)
if extractedCheckSumAlgorithm.String() != cr.checkSumAlgorithm {
errorMessage := fmt.Sprintf("checksum algorithm in trailer '%s' does not match the one advertised in the header '%s'", extractedCheckSumAlgorithm.String(), cr.checkSumAlgorithm)
glog.V(3).Infof(errorMessage)
cr.err = errors.New(errorMessage)
return 0, cr.err
}
computedChecksum := cr.checkSumWriter.Sum(nil)
base64Checksum := base64.StdEncoding.EncodeToString(computedChecksum)
if string(extractedChecksum) != base64Checksum {
// TODO: Return BadDigest
glog.V(3).Infof("payload checksum '%s' does not match provided checksum '%s'", base64Checksum, string(extractedChecksum))
cr.err = errors.New("payload checksum does not match")
return 0, cr.err
}
// TODO: Extract signature from trailer chunk and verify it.
// For now, we just read the trailer chunk and discard it.
// Reading remaining CRLF.
for i := 0; i < 2; i++ {
cr.err = readCRLF(cr.reader)
}
cr.state = eofChunk
case readChunk: case readChunk:
// There is no more space left in the request buffer. // There is no more space left in the request buffer.
if len(buf) == 0 { if len(buf) == 0 {
@ -275,6 +426,11 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
// Calculate sha256. // Calculate sha256.
cr.chunkSHA256Writer.Write(rbuf[:n0]) cr.chunkSHA256Writer.Write(rbuf[:n0])
// Compute checksum
if cr.checkSumWriter != nil {
cr.checkSumWriter.Write(rbuf[:n0])
}
// Update the bytes read into request buffer so far. // Update the bytes read into request buffer so far.
n += n0 n += n0
buf = buf[n0:] buf = buf[n0:]
@ -333,12 +489,29 @@ func (cr *s3ChunkedReader) getChunkSignature(hashedChunk string) string {
// readCRLF - check if reader only has '\r\n' CRLF character. // readCRLF - check if reader only has '\r\n' CRLF character.
// returns malformed encoding if it doesn't. // returns malformed encoding if it doesn't.
func readCRLF(reader io.Reader) error {
func readCRLF(reader *bufio.Reader) error {
buf := make([]byte, 2) buf := make([]byte, 2)
_, err := io.ReadFull(reader, buf[:2])
_, err := reader.Read(buf)
if err != nil { if err != nil {
return err return err
} }
return checkCRLF(buf)
}
// peekCRLF - peeks at the next two bytes to check for CRLF without consuming them.
func peekCRLF(reader *bufio.Reader) error {
peeked, err := reader.Peek(2)
if err != nil {
return err
}
if err := checkCRLF(peeked); err != nil {
return err
}
return nil
}
// checkCRLF - checks if the buffer contains '\r\n' CRLF character.
func checkCRLF(buf []byte) error {
if buf[0] != '\r' || buf[1] != '\n' { if buf[0] != '\r' || buf[1] != '\n' {
return errMalformedEncoding return errMalformedEncoding
} }
@ -349,7 +522,7 @@ func readCRLF(reader io.Reader) error {
// Give up if the line exceeds maxLineLength. // Give up if the line exceeds maxLineLength.
// The returned bytes are owned by the bufio.Reader // The returned bytes are owned by the bufio.Reader
// so they are only valid until the next bufio read. // so they are only valid until the next bufio read.
func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) {
func readChunkLine(b *bufio.Reader) ([]byte, error) {
buf, err := b.ReadSlice('\n') buf, err := b.ReadSlice('\n')
if err != nil { if err != nil {
// We always know when EOF is coming. // We always know when EOF is coming.
@ -359,14 +532,13 @@ func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) {
} else if err == bufio.ErrBufferFull { } else if err == bufio.ErrBufferFull {
err = errLineTooLong err = errLineTooLong
} }
return nil, nil, err
return nil, err
} }
if len(buf) >= maxLineLength { if len(buf) >= maxLineLength {
return nil, nil, errLineTooLong
return nil, errLineTooLong
} }
// Parse s3 specific chunk extension and fetch the values.
hexChunkSize, hexChunkSignature := parseS3ChunkExtension(buf)
return hexChunkSize, hexChunkSignature, nil
return buf, nil
} }
// trimTrailingWhitespace - trim trailing white space. // trimTrailingWhitespace - trim trailing white space.
@ -393,12 +565,50 @@ func parseS3ChunkExtension(buf []byte) ([]byte, []byte) {
buf = trimTrailingWhitespace(buf) buf = trimTrailingWhitespace(buf)
semi := bytes.Index(buf, []byte(s3ChunkSignatureStr)) semi := bytes.Index(buf, []byte(s3ChunkSignatureStr))
// Chunk signature not found, return the whole buffer. // Chunk signature not found, return the whole buffer.
// This means we're using unsigned streaming upload.
if semi == -1 { if semi == -1 {
return buf, nil return buf, nil
} }
return buf[:semi], parseChunkSignature(buf[semi:]) return buf[:semi], parseChunkSignature(buf[semi:])
} }
func parseChunkChecksum(b *bufio.Reader) (ChecksumAlgorithm, []byte) {
// When using unsigned upload, this would be the raw contents of the trailer chunk:
//
// x-amz-checksum-crc32:YABb/g==\n\r\n\r\n // Trailer chunk (note optional \n character)
// \r\n // CRLF
//
// When using signed upload with an additional checksum algorithm, this would be the raw contents of the trailer chunk:
//
// x-amz-checksum-crc32:YABb/g==\n\r\n // Trailer chunk (note optional \n character)
// trailer-signature\r\n
// \r\n // CRLF
//
// x-amz-checksum-crc32:YABb/g==\n
bytesRead, err := readChunkLine(b)
if err != nil {
return ChecksumAlgorithmNone, nil
}
// Split on ':'
parts := bytes.SplitN(bytesRead, []byte(":"), 2)
checksumKey := string(parts[0])
checksumValue := parts[1]
// Discard all trailing whitespace characters
checksumValue = trimTrailingWhitespace(checksumValue)
// If the checksum key is not a supported checksum algorithm, return an error.
// TODO: Bubble that error up to the caller
extractedAlgorithm, err := extractChecksumAlgorithm(checksumKey)
if err != nil {
return ChecksumAlgorithmNone, nil
}
return extractedAlgorithm, checksumValue
}
// parseChunkSignature - parse chunk signature. // parseChunkSignature - parse chunk signature.
func parseChunkSignature(chunk []byte) []byte { func parseChunkSignature(chunk []byte) []byte {
chunkSplits := bytes.SplitN(chunk, []byte(s3ChunkSignatureStr), 2) chunkSplits := bytes.SplitN(chunk, []byte(s3ChunkSignatureStr), 2)
@ -426,3 +636,49 @@ func parseHexUint(v []byte) (n uint64, err error) {
} }
return return
} }
type ChecksumAlgorithm int
const (
ChecksumAlgorithmNone ChecksumAlgorithm = iota
ChecksumAlgorithmCRC32
ChecksumAlgorithmCRC32C
ChecksumAlgorithmCRC64NVMe
ChecksumAlgorithmSHA1
ChecksumAlgorithmSHA256
)
func (ca ChecksumAlgorithm) String() string {
switch ca {
case ChecksumAlgorithmCRC32:
return "CRC32"
case ChecksumAlgorithmCRC32C:
return "CRC32C"
case ChecksumAlgorithmCRC64NVMe:
return "CRC64NVMe"
case ChecksumAlgorithmSHA1:
return "SHA1"
case ChecksumAlgorithmSHA256:
return "SHA256"
case ChecksumAlgorithmNone:
return ""
}
return ""
}
// getCheckSumWriter - get checksum writer.
func getCheckSumWriter(checksumAlgorithm ChecksumAlgorithm) hash.Hash {
switch checksumAlgorithm {
case ChecksumAlgorithmCRC32:
return crc32.NewIEEE()
case ChecksumAlgorithmCRC32C:
return crc32.New(crc32.MakeTable(crc32.Castagnoli))
case ChecksumAlgorithmCRC64NVMe:
return crc64.New(crc64.MakeTable(crc64.ISO))
case ChecksumAlgorithmSHA1:
return sha1.New()
case ChecksumAlgorithmSHA256:
return sha256.New()
}
return nil
}

139
weed/s3api/chunked_reader_v4_test.go

@ -2,19 +2,20 @@ package s3api
import ( import (
"bytes" "bytes"
"encoding/base64"
"fmt"
"io" "io"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
"testing" "testing"
"hash/crc32"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
// This test will implement the following scenario:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
const ( const (
defaultTimestamp = "20130524T000000Z" defaultTimestamp = "20130524T000000Z"
defaultBucketName = "examplebucket" defaultBucketName = "examplebucket"
@ -23,19 +24,26 @@ const (
defaultRegion = "us-east-1" defaultRegion = "us-east-1"
) )
func generatePayload() string {
func generatestreamingAws4HmacSha256Payload() string {
// This test will implement the following scenario:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
chunk1 := "10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n" + chunk1 := "10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n" +
strings.Repeat("a", 65536) + "\r\n" strings.Repeat("a", 65536) + "\r\n"
chunk2 := "400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n" + chunk2 := "400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n" +
strings.Repeat("a", 1024) + "\r\n" strings.Repeat("a", 1024) + "\r\n"
chunk3 := "0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n\r\n"
chunk3 := "0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n" +
"\r\n" // The last chunk is empty
payload := chunk1 + chunk2 + chunk3 payload := chunk1 + chunk2 + chunk3
return payload return payload
} }
func NewRequest() (*http.Request, error) {
payload := generatePayload()
func NewRequeststreamingAws4HmacSha256Payload() (*http.Request, error) {
// This test will implement the following scenario:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
payload := generatestreamingAws4HmacSha256Payload()
req, err := http.NewRequest("PUT", "http://s3.amazonaws.com/examplebucket/chunkObject.txt", bytes.NewReader([]byte(payload))) req, err := http.NewRequest("PUT", "http://s3.amazonaws.com/examplebucket/chunkObject.txt", bytes.NewReader([]byte(payload)))
if err != nil { if err != nil {
return nil, err return nil, err
@ -53,13 +61,109 @@ func NewRequest() (*http.Request, error) {
return req, nil return req, nil
} }
func TestNewSignV4ChunkedReader(t *testing.T) {
req, err := NewRequest()
func TestNewSignV4ChunkedReaderstreamingAws4HmacSha256Payload(t *testing.T) {
// This test will implement the following scenario:
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
req, err := NewRequeststreamingAws4HmacSha256Payload()
if err != nil { if err != nil {
t.Fatalf("Failed to create request: %v", err) t.Fatalf("Failed to create request: %v", err)
} }
iam := setupIam()
// The expected payload a long string of 'a's
expectedPayload := strings.Repeat("a", 66560)
runWithRequest(iam, req, t, expectedPayload)
}
func generateStreamingUnsignedPayloadTrailerPayload(includeFinalCRLF bool) string {
// This test will implement the following scenario:
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
chunk1 := "2000\r\n" + strings.Repeat("a", 8192) + "\r\n"
chunk2 := "2000\r\n" + strings.Repeat("a", 8192) + "\r\n"
chunk3 := "400\r\n" + strings.Repeat("a", 1024) + "\r\n"
chunk4 := "0\r\n" /* the last chunk is empty */
if includeFinalCRLF {
// Some clients omit the final CRLF, so we need to test that case as well
chunk4 += "\r\n"
}
data := strings.Repeat("a", 17408)
writer := crc32.NewIEEE()
_, err := writer.Write([]byte(data))
if err != nil {
fmt.Println("Error:", err)
}
checksum := writer.Sum(nil)
base64EncodedChecksum := base64.StdEncoding.EncodeToString(checksum)
trailer := "x-amz-checksum-crc32:" + base64EncodedChecksum + "\n\r\n\r\n\r\n"
payload := chunk1 + chunk2 + chunk3 + chunk4 + trailer
return payload
}
func NewRequestStreamingUnsignedPayloadTrailer(includeFinalCRLF bool) (*http.Request, error) {
// This test will implement the following scenario:
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
payload := generateStreamingUnsignedPayloadTrailerPayload(includeFinalCRLF)
req, err := http.NewRequest("PUT", "http://amzn-s3-demo-bucket/Key+", bytes.NewReader([]byte(payload)))
if err != nil {
return nil, err
}
req.Header.Set("Host", "amzn-s3-demo-bucket")
req.Header.Set("x-amz-date", defaultTimestamp)
req.Header.Set("Content-Encoding", "aws-chunked")
req.Header.Set("x-amz-decoded-content-length", "17408")
req.Header.Set("x-amz-content-sha256", "STREAMING-UNSIGNED-PAYLOAD-TRAILER")
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32")
return req, nil
}
func TestNewSignV4ChunkedReaderStreamingUnsignedPayloadTrailer(t *testing.T) {
// This test will implement the following scenario:
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
iam := setupIam()
req, err := NewRequestStreamingUnsignedPayloadTrailer(true)
if err != nil {
t.Fatalf("Failed to create request: %v", err)
}
// The expected payload a long string of 'a's
expectedPayload := strings.Repeat("a", 17408)
runWithRequest(iam, req, t, expectedPayload)
req, err = NewRequestStreamingUnsignedPayloadTrailer(false)
if err != nil {
t.Fatalf("Failed to create request: %v", err)
}
runWithRequest(iam, req, t, expectedPayload)
}
func runWithRequest(iam IdentityAccessManagement, req *http.Request, t *testing.T, expectedPayload string) {
reader, errCode := iam.newChunkedReader(req)
assert.NotNil(t, reader)
assert.Equal(t, s3err.ErrNone, errCode)
data, err := io.ReadAll(reader)
if err != nil {
t.Fatalf("Failed to read data: %v", err)
}
assert.Equal(t, expectedPayload, string(data))
}
func setupIam() IdentityAccessManagement {
// Create an IdentityAccessManagement instance // Create an IdentityAccessManagement instance
// Add default access keys and secrets
iam := IdentityAccessManagement{ iam := IdentityAccessManagement{
identities: []*Identity{}, identities: []*Identity{},
accessKeyIdent: map[string]*Identity{}, accessKeyIdent: map[string]*Identity{},
@ -72,7 +176,6 @@ func TestNewSignV4ChunkedReader(t *testing.T) {
isAuthEnabled: false, isAuthEnabled: false,
} }
// Add default access keys and secrets
iam.identities = append(iam.identities, &Identity{ iam.identities = append(iam.identities, &Identity{
Name: "default", Name: "default",
Credentials: []*Credential{ Credentials: []*Credential{
@ -89,19 +192,5 @@ func TestNewSignV4ChunkedReader(t *testing.T) {
}) })
iam.accessKeyIdent[defaultAccessKeyId] = iam.identities[0] iam.accessKeyIdent[defaultAccessKeyId] = iam.identities[0]
// Call newSignV4ChunkedReader
reader, errCode := iam.newSignV4ChunkedReader(req)
assert.NotNil(t, reader)
assert.Equal(t, s3err.ErrNone, errCode)
data, err := io.ReadAll(reader)
if err != nil {
t.Fatalf("Failed to read data: %v", err)
}
// The expected payload a long string of 'a's
expectedPayload := strings.Repeat("a", 66560)
assert.Equal(t, expectedPayload, string(data))
return iam
} }

2
weed/s3api/s3api_object_handlers_multipart.go

@ -235,7 +235,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
var s3ErrCode s3err.ErrorCode var s3ErrCode s3err.ErrorCode
switch rAuthType { switch rAuthType {
case authTypeStreamingSigned: case authTypeStreamingSigned:
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
dataReader, s3ErrCode = s3a.iam.newChunkedReader(r)
case authTypeSignedV2, authTypePresignedV2: case authTypeSignedV2, authTypePresignedV2:
_, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:

2
weed/s3api/s3api_object_handlers_put.go

@ -53,7 +53,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
var s3ErrCode s3err.ErrorCode var s3ErrCode s3err.ErrorCode
switch rAuthType { switch rAuthType {
case authTypeStreamingSigned, authTypeStreamingUnsigned: case authTypeStreamingSigned, authTypeStreamingUnsigned:
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
dataReader, s3ErrCode = s3a.iam.newChunkedReader(r)
case authTypeSignedV2, authTypePresignedV2: case authTypeSignedV2, authTypePresignedV2:
_, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:

Loading…
Cancel
Save