diff --git a/.github/workflows/binaries_release4.yml b/.github/workflows/binaries_release4.yml new file mode 100644 index 000000000..b246f1f98 --- /dev/null +++ b/.github/workflows/binaries_release4.yml @@ -0,0 +1,60 @@ +# This is a basic workflow to help you get started with Actions + +name: "go: build versioned binaries for linux with all tags" + +on: + push: + tags: + - '*' + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +permissions: + contents: read + +jobs: + + build-release-binaries_linux: + permissions: + contents: write # for wangyoucao577/go-release-action to upload release assets + runs-on: ubuntu-latest + strategy: + matrix: + goos: [linux] + goarch: [amd64] + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 + - name: Go Release Binaries Normal Volume Size + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + goos: ${{ matrix.goos }} + goarch: ${{ matrix.goarch }} + overwrite: true + build_flags: -tags elastic,ydb,gocdk,hdfs,rocksdb + pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 + # build_flags: -tags 5BytesOffset # optional, default is + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} + # Where to run `go build .` + project_path: weed + binary_name: weed + asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full" + - name: Go Release Large Disk Binaries + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + goos: ${{ matrix.goos }} + goarch: ${{ matrix.goarch }} + overwrite: true + pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 + build_flags: -tags 5BytesOffset,elastic,ydb,gocdk,hdfs,rocksdb + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} + # Where to run `go build .` + project_path: weed + binary_name: weed + asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full_large_disk" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..142e4e963 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,43 @@ +name: "Code Scanning - Action" + +on: + pull_request: + +jobs: + CodeQL-Build: + # CodeQL runs on ubuntu-latest, windows-latest, and macos-latest + runs-on: ubuntu-latest + + permissions: + # required for all workflows + security-events: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + # Override language selection by uncommenting this and choosing your languages + with: + languages: go + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below). + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following + # three lines and modify them (or add more) to build your code if your + # project uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/container_release2.yml b/.github/workflows/container_release2.yml index 6da882e38..a02ab4f87 100644 --- a/.github/workflows/container_release2.yml +++ b/.github/workflows/container_release2.yml @@ -52,7 +52,8 @@ jobs: with: context: ./docker push: ${{ github.event_name != 'pull_request' }} - file: ./docker/Dockerfile.go_build_large + file: ./docker/Dockerfile.go_build + build-args: TAGS=5BytesOffset platforms: linux/amd64, linux/arm, linux/arm64, linux/386 tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} diff --git a/.github/workflows/container_release4.yml b/.github/workflows/container_release4.yml new file mode 100644 index 000000000..92d776f79 --- /dev/null +++ b/.github/workflows/container_release4.yml @@ -0,0 +1,58 @@ +name: "docker: build release containers for all tags" + +on: + push: + tags: + - '*' + workflow_dispatch: [] + +permissions: + contents: read + +jobs: + build-default-release-container: + runs-on: [ubuntu-latest] + + steps: + - + name: Checkout + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 + - + name: Docker meta + id: docker_meta + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 + with: + images: | + chrislusf/seaweedfs + tags: | + type=ref,event=tag,suffix=_full + flavor: | + latest=false + labels: | + org.opencontainers.image.title=seaweedfs + org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast! + org.opencontainers.image.vendor=Chris Lu + - + name: Set up QEMU + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 + - + name: Login to Docker Hub + if: github.event_name != 'pull_request' + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - + name: Build + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 + with: + context: ./docker + push: ${{ github.event_name != 'pull_request' }} + file: ./docker/Dockerfile.go_build + build-args: TAGS=elastic,ydb,gocdk,hdfs + platforms: linux/amd64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index b90c65069..c917ec556 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -3,10 +3,11 @@ RUN apk add git g++ fuse RUN mkdir -p /go/src/github.com/chrislusf/ RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs ARG BRANCH=${BRANCH:-master} +ARG TAGS RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ - && CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}" + && CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}" FROM alpine AS final LABEL author="Chris Lu" diff --git a/docker/Dockerfile.go_build_large b/docker/Dockerfile.go_build_large deleted file mode 100644 index 5c5e84233..000000000 --- a/docker/Dockerfile.go_build_large +++ /dev/null @@ -1,43 +0,0 @@ -FROM golang:1.18-alpine as builder -RUN apk add git g++ fuse -RUN mkdir -p /go/src/github.com/chrislusf/ -RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs -ARG BRANCH=${BRANCH:-master} -RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH -RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ - && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ - && CGO_ENABLED=0 go install -tags 5BytesOffset -ldflags "-extldflags -static ${LDFLAGS}" - -FROM alpine AS final -LABEL author="Chris Lu" -COPY --from=builder /go/bin/weed /usr/bin/ -RUN mkdir -p /etc/seaweedfs -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh -RUN apk add fuse # for weed mount - -# volume server gprc port -EXPOSE 18080 -# volume server http port -EXPOSE 8080 -# filer server gprc port -EXPOSE 18888 -# filer server http port -EXPOSE 8888 -# master server shared gprc port -EXPOSE 19333 -# master server shared http port -EXPOSE 9333 -# s3 server http port -EXPOSE 8333 -# webdav server http port -EXPOSE 7333 - -RUN mkdir -p /data/filerldb2 - -VOLUME /data -WORKDIR /data - -RUN chmod +x /entrypoint.sh - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.rocksdb_large b/docker/Dockerfile.rocksdb_large index a1a84f884..0025eb116 100644 --- a/docker/Dockerfile.rocksdb_large +++ b/docker/Dockerfile.rocksdb_large @@ -3,7 +3,7 @@ FROM golang:1.18-buster as builder RUN apt-get update RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev -ENV ROCKSDB_VERSION v7.0.4 +ENV ROCKSDB_VERSION v7.2.2 # build RocksDB RUN cd /tmp && \ diff --git a/docker/Makefile b/docker/Makefile index c8603309d..c023fc1ae 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -7,14 +7,17 @@ gen: dev binary: export SWCOMMIT=$(shell git rev-parse --short HEAD) export SWLDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)" - cd ../weed; CGO_ENABLED=0 GOOS=linux go build --tags "$(tags)" -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/ + cd ../weed; CGO_ENABLED=0 GOOS=linux go build -tags "$(tags)" -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/ build: binary docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . rm ./weed -build_gorocksdb: - docker build --no-cache -t chrislusf/gorocksdb -f Dockerfile.go_rocksdb . +go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,rocksdb,5BytesOffset + docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build . + +go_build_large_disk: + docker build --build-arg TAGS=large_disk --no-cache -t chrislusf/seaweedfs:large_disk -f Dockerfile.go_build . build_rocksdb: docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large . diff --git a/weed/s3api/http/header.go b/weed/s3api/http/header.go index d63d50443..30fc8eefa 100644 --- a/weed/s3api/http/header.go +++ b/weed/s3api/http/header.go @@ -28,11 +28,14 @@ const ( AmzStorageClass = "x-amz-storage-class" // S3 user-defined metadata - AmzUserMetaPrefix = "X-Amz-Meta-" + AmzUserMetaPrefix = "X-Amz-Meta-" + AmzUserMetaDirective = "X-Amz-Metadata-Directive" // S3 object tagging - AmzObjectTagging = "X-Amz-Tagging" - AmzTagCount = "x-amz-tagging-count" + AmzObjectTagging = "X-Amz-Tagging" + AmzObjectTaggingPrefix = "X-Amz-Tagging-" + AmzObjectTaggingDirective = "X-Amz-Tagging-Directive" + AmzTagCount = "x-amz-tagging-count" ) // Non-Standard S3 HTTP request constants diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go index f62db9c31..c44ca7ddf 100644 --- a/weed/s3api/s3api_object_copy_handlers.go +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -3,9 +3,10 @@ package s3api import ( "fmt" "github.com/chrislusf/seaweedfs/weed/glog" + headers "github.com/chrislusf/seaweedfs/weed/s3api/http" xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" - weed_server "github.com/chrislusf/seaweedfs/weed/server" + "modernc.org/strutil" "net/http" "net/url" "strconv" @@ -15,6 +16,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) +const ( + DirectiveCopy = "COPY" + DirectiveReplace = "REPLACE" +) + func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { dstBucket, dstObject := xhttp.GetBucketAndObject(r) @@ -30,7 +36,9 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject) - if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) { + replaceMeta, replaceTagging := replaceDirective(r.Header) + + if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && (replaceMeta || replaceTagging) { fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) dir, name := fullPath.DirAndName() entry, err := s3a.getEntry(dir, name) @@ -38,7 +46,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) return } - entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r)) + entry.Extended = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging) err = s3a.touch(dir, name, entry) if err != nil { s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) @@ -80,6 +88,11 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request } defer util.CloseResponse(resp) + tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name) + if tagErr != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body) @@ -182,6 +195,107 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req } -func isReplace(r *http.Request) bool { - return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" +func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) { + return reqHeader.Get(headers.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(headers.AmzObjectTaggingDirective) == DirectiveReplace +} + +func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) { + if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) == 0 { + if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 { + reqHeader[xhttp.AmzStorageClass] = sc + } + } + + if !replaceMeta { + for header, _ := range reqHeader { + if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) { + delete(reqHeader, header) + } + } + for k, v := range existing { + if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) { + reqHeader[k] = v + } + } + } + + if !replaceTagging { + for header, _ := range reqHeader { + if strings.HasPrefix(header, xhttp.AmzObjectTagging) { + delete(reqHeader, header) + } + } + + found := false + for k, _ := range existing { + if strings.HasPrefix(k, xhttp.AmzObjectTaggingPrefix) { + found = true + break + } + } + + if found { + tags, err := getTags(dir, name) + if err != nil { + return err + } + + var tagArr []string + for k, v := range tags { + tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v)) + } + tagStr := strutil.JoinFields(tagArr, "&") + reqHeader.Set(xhttp.AmzObjectTagging, tagStr) + } + } + return +} + +func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte) { + metadata = make(map[string][]byte) + + if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 { + metadata[xhttp.AmzStorageClass] = sc + } + if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) > 0 { + metadata[xhttp.AmzStorageClass] = []byte(sc) + } + + if replaceMeta { + for header, values := range reqHeader { + if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) { + for _, value := range values { + metadata[header] = []byte(value) + } + } + } + } else { + for k, v := range existing { + if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) { + metadata[k] = v + } + } + } + + if replaceTagging { + if tags := reqHeader.Get(xhttp.AmzObjectTagging); tags != "" { + for _, v := range strings.Split(tags, "&") { + tag := strings.Split(v, "=") + if len(tag) == 2 { + metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) + } else if len(tag) == 1 { + metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = nil + } + } + } + } else { + for k, v := range existing { + if strings.HasPrefix(k, xhttp.AmzObjectTagging) { + metadata[k] = v + } + } + delete(metadata, xhttp.AmzTagCount) + } + + return } diff --git a/weed/s3api/s3api_object_copy_handlers_test.go b/weed/s3api/s3api_object_copy_handlers_test.go new file mode 100644 index 000000000..d2c8e488b --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers_test.go @@ -0,0 +1,426 @@ +package s3api + +import ( + "fmt" + headers "github.com/chrislusf/seaweedfs/weed/s3api/http" + "net/http" + "reflect" + "sort" + "strings" + "testing" +) + +type H map[string]string + +func (h H) String() string { + pairs := make([]string, 0, len(h)) + for k, v := range h { + pairs = append(pairs, fmt.Sprintf("%s : %s", k, v)) + } + sort.Strings(pairs) + join := strings.Join(pairs, "\n") + return "\n" + join + "\n" +} + +var processMetadataTestCases = []struct { + caseId int + request H + existing H + getTags H + want H +}{ + { + 201, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-Type": "existing", + }, + H{ + "A": "B", + "a": "b", + "type": "existing", + }, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging": "A=B&a=b&type=existing", + }, + }, + { + 202, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-Type": "existing", + }, + H{ + "A": "B", + "a": "b", + "type": "existing", + }, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=existing", + headers.AmzUserMetaDirective: DirectiveReplace, + }, + }, + + { + 203, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-Type": "existing", + }, + H{ + "A": "B", + "a": "b", + "type": "existing", + }, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + }, + + { + 204, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-Type": "existing", + }, + H{ + "A": "B", + "a": "b", + "type": "existing", + }, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + }, + + { + 205, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{}, + H{}, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + }, + + { + 206, + H{ + "User-Agent": "firefox", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-Type": "existing", + }, + H{ + "A": "B", + "a": "b", + "type": "existing", + }, + H{ + "User-Agent": "firefox", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + }, + + { + 207, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-Type": "existing", + }, + H{ + "A": "B", + "a": "b", + "type": "existing", + }, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + }, +} +var processMetadataBytesTestCases = []struct { + caseId int + request H + existing H + want H +}{ + { + 101, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "existing", + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "existing", + }, + }, + + { + 102, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "existing", + }, + H{ + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "existing", + }, + }, + + { + 103, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "existing", + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "request", + }, + }, + + { + 104, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "existing", + }, + H{ + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "request", + }, + }, + + { + 105, + H{ + "User-Agent": "firefox", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{ + "X-Amz-Meta-My-Meta": "existing", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "existing", + }, + H{}, + }, + + { + 107, + H{ + "User-Agent": "firefox", + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging": "A=B&a=b&type=request", + headers.AmzUserMetaDirective: DirectiveReplace, + headers.AmzObjectTaggingDirective: DirectiveReplace, + }, + H{}, + H{ + "X-Amz-Meta-My-Meta": "request", + "X-Amz-Tagging-A": "B", + "X-Amz-Tagging-a": "b", + "X-Amz-Tagging-type": "request", + }, + }, +} + +func TestProcessMetadata(t *testing.T) { + for _, tc := range processMetadataTestCases { + reqHeader := transferHToHeader(tc.request) + existing := transferHToHeader(tc.existing) + replaceMeta, replaceTagging := replaceDirective(reqHeader) + + err := processMetadata(reqHeader, existing, replaceMeta, replaceTagging, func(_ string, _ string) (tags map[string]string, err error) { + return tc.getTags, nil + }, "", "") + if err != nil { + t.Error(err) + } + + result := transferHeaderToH(reqHeader) + fmtTagging(result, tc.want) + + if !reflect.DeepEqual(result, tc.want) { + t.Error(fmt.Errorf("\n### CaseID: %d ###"+ + "\nRequest:%v"+ + "\nExisting:%v"+ + "\nGetTags:%v"+ + "\nWant:%v"+ + "\nActual:%v", + tc.caseId, tc.request, tc.existing, tc.getTags, tc.want, result)) + } + } +} + +func TestProcessMetadataBytes(t *testing.T) { + for _, tc := range processMetadataBytesTestCases { + reqHeader := transferHToHeader(tc.request) + existing := transferHToBytesArr(tc.existing) + replaceMeta, replaceTagging := replaceDirective(reqHeader) + extends := processMetadataBytes(reqHeader, existing, replaceMeta, replaceTagging) + + result := transferBytesArrToH(extends) + fmtTagging(result, tc.want) + + if !reflect.DeepEqual(result, tc.want) { + t.Error(fmt.Errorf("\n### CaseID: %d ###"+ + "\nRequest:%v"+ + "\nExisting:%v"+ + "\nWant:%v"+ + "\nActual:%v", + tc.caseId, tc.request, tc.existing, tc.want, result)) + } + } +} + +func fmtTagging(maps ...map[string]string) { + for _, m := range maps { + if tagging := m[headers.AmzObjectTagging]; len(tagging) > 0 { + split := strings.Split(tagging, "&") + sort.Strings(split) + m[headers.AmzObjectTagging] = strings.Join(split, "&") + } + } +} + +func transferHToHeader(data map[string]string) http.Header { + header := http.Header{} + for k, v := range data { + header.Add(k, v) + } + return header +} + +func transferHToBytesArr(data map[string]string) map[string][]byte { + m := make(map[string][]byte, len(data)) + for k, v := range data { + m[k] = []byte(v) + } + return m +} + +func transferBytesArrToH(data map[string][]byte) H { + m := make(map[string]string, len(data)) + for k, v := range data { + m[k] = string(v) + } + return m +} + +func transferHeaderToH(data map[string][]string) H { + m := make(map[string]string, len(data)) + for k, v := range data { + m[k] = v[len(v)-1] + } + return m +}